From 99b5d0f26b5138b7ab2f32ce0ea9671887df8f36 Mon Sep 17 00:00:00 2001 From: Ulises Jeremias Date: Mon, 19 Feb 2024 18:08:57 -0300 Subject: [PATCH] Initial commit --- .circleci/config.yml | 168 +++++++++ .dependencies_installed | 0 .gitignore | 162 +++++++++ LICENSE | 21 ++ README.md | 133 +++++++ contrib/CODE_REVIEW_DOCS.md | 72 ++++ contrib/CONTRIBUTING.md | 213 +++++++++++ contrib/DEVELOPMENT_WORKFLOW.md | 165 +++++++++ contrib/STYLE.md | 348 ++++++++++++++++++ docs/running_on_mainnet.md | 244 +++++++++++++ docs/running_on_staging.md | 340 ++++++++++++++++++ docs/running_on_testnet.md | 242 +++++++++++++ docs/stream_tutorial/README.md | 490 ++++++++++++++++++++++++++ docs/stream_tutorial/client.py | 104 ++++++ docs/stream_tutorial/config.py | 122 +++++++ docs/stream_tutorial/miner.py | 402 +++++++++++++++++++++ docs/stream_tutorial/protocol.py | 154 ++++++++ docs/what_are_subnets.md | 27 ++ min_compute.yml | 87 +++++ neurons/__init__.py | 0 neurons/miner.py | 160 +++++++++ neurons/validator.py | 69 ++++ requirements.txt | 2 + scripts/check_compatibility.sh | 76 ++++ scripts/check_requirements_changes.sh | 10 + scripts/install_staging.sh | 145 ++++++++ setup.py | 96 +++++ subnet_links.json | 136 +++++++ template/__init__.py | 40 +++ template/base/__init__.py | 0 template/base/miner.py | 222 ++++++++++++ template/base/neuron.py | 175 +++++++++ template/base/validator.py | 362 +++++++++++++++++++ template/mock.py | 121 +++++++ template/protocol.py | 76 ++++ template/utils/__init__.py | 3 + template/utils/config.py | 249 +++++++++++++ template/utils/misc.py | 112 ++++++ template/utils/uids.py | 63 ++++ template/validator/__init__.py | 2 + template/validator/forward.py | 61 ++++ template/validator/reward.py | 54 +++ tests/__init__.py | 0 tests/helpers.py | 181 ++++++++++ tests/test_mock.py | 92 +++++ tests/test_template_validator.py | 114 ++++++ 46 files changed, 6115 insertions(+) create mode 100644 .circleci/config.yml create mode 100644 .dependencies_installed create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 contrib/CODE_REVIEW_DOCS.md create mode 100644 contrib/CONTRIBUTING.md create mode 100644 contrib/DEVELOPMENT_WORKFLOW.md create mode 100644 contrib/STYLE.md create mode 100644 docs/running_on_mainnet.md create mode 100644 docs/running_on_staging.md create mode 100644 docs/running_on_testnet.md create mode 100644 docs/stream_tutorial/README.md create mode 100644 docs/stream_tutorial/client.py create mode 100644 docs/stream_tutorial/config.py create mode 100644 docs/stream_tutorial/miner.py create mode 100644 docs/stream_tutorial/protocol.py create mode 100644 docs/what_are_subnets.md create mode 100644 min_compute.yml create mode 100644 neurons/__init__.py create mode 100644 neurons/miner.py create mode 100644 neurons/validator.py create mode 100644 requirements.txt create mode 100755 scripts/check_compatibility.sh create mode 100755 scripts/check_requirements_changes.sh create mode 100644 scripts/install_staging.sh create mode 100644 setup.py create mode 100644 subnet_links.json create mode 100644 template/__init__.py create mode 100644 template/base/__init__.py create mode 100644 template/base/miner.py create mode 100644 template/base/neuron.py create mode 100644 template/base/validator.py create mode 100644 template/mock.py create mode 100644 template/protocol.py create mode 100644 template/utils/__init__.py create mode 100644 template/utils/config.py create mode 100644 template/utils/misc.py create mode 100644 template/utils/uids.py create mode 100644 template/validator/__init__.py create mode 100644 template/validator/forward.py create mode 100644 template/validator/reward.py create mode 100644 tests/__init__.py create mode 100644 tests/helpers.py create mode 100644 tests/test_mock.py create mode 100644 tests/test_template_validator.py diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000..0473afe --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,168 @@ +version: 2.1 + +orbs: + python: circleci/python@2.1.1 + python-lib: dialogue/python-lib@0.1.55 + # coveralls: coveralls/coveralls@1.0.6 + +jobs: + black: + resource_class: small + parameters: + python-version: + type: string + docker: + - image: cimg/python:<< parameters.python-version >> + + steps: + - checkout + + - restore_cache: + name: Restore cached black venv + keys: + - v1-pypi-py-black-<< parameters.python-version >> + + - run: + name: Update & Activate black venv + command: | + python -m venv env/ + . env/bin/activate + python -m pip install --upgrade pip + pip install black + + - save_cache: + name: Save cached black venv + paths: + - "env/" + key: v1-pypi-py-black-<< parameters.python-version >> + + - run: + name: Black format check + command: | + . env/bin/activate + black --line-length 79 --exclude '(env|venv|.eggs)' --check . + + pylint: + resource_class: small + parameters: + python-version: + type: string + docker: + - image: cimg/python:<< parameters.python-version >> + + steps: + - checkout + + - run: + name: Install Pylint + command: | + python -m venv env/ + . env/bin/activate + pip install pylint + + - run: + name: Pylint check + command: | + . env/bin/activate + pylint --fail-on=W,E,F --exit-zero ./ + + check_compatibility: + parameters: + python_version: + type: string + docker: + - image: cimg/python:3.10 + steps: + - checkout + - run: + name: Check if requirements files have changed + command: ./scripts/check_requirements_changes.sh + - run: + name: Install dependencies and Check compatibility + command: | + if [ "$REQUIREMENTS_CHANGED" == "true" ]; then + sudo apt-get update + sudo apt-get install -y jq curl + ./scripts/check_compatibility.sh << parameters.python_version >> + else + echo "Skipping compatibility checks..." + fi + + build: + resource_class: medium + parallelism: 2 + parameters: + python-version: + type: string + docker: + - image: cimg/python:<< parameters.python-version >> + + steps: + - checkout + + - restore_cache: + name: Restore cached venv + keys: + - v1-pypi-py<< parameters.python-version >>-{{ checksum "requirements.txt" }} + - v1-pypi-py<< parameters.python-version >> + + - run: + name: Update & Activate venv + command: | + python -m venv env/ + . env/bin/activate + python -m pip install --upgrade pip + + - save_cache: + name: Save cached venv + paths: + - "env/" + key: v1-pypi-py<< parameters.python-version >>-{{ checksum "requirements.txt" }} + + - run: + name: Install Bittensor Subnet Template + command: | + . env/bin/activate + pip install -e . + + - store_test_results: + path: test-results + - store_artifacts: + path: test-results + + coveralls: + docker: + - image: cimg/python:3.10 + steps: + - run: + name: Combine Coverage + command: | + pip3 install --upgrade coveralls + coveralls --finish --rcfile .coveragerc || echo "Failed to upload coverage" + +workflows: + compatibility_checks: + jobs: + - check_compatibility: + python_version: "3.8" + name: check-compatibility-3.8 + - check_compatibility: + python_version: "3.9" + name: check-compatibility-3.9 + - check_compatibility: + python_version: "3.10" + name: check-compatibility-3.10 + - check_compatibility: + python_version: "3.11" + name: check-compatibility-3.11 + + pr-requirements: + jobs: + - black: + python-version: "3.8.12" + - pylint: + python-version: "3.8.12" + - build: + matrix: + parameters: + python-version: ["3.9.13", "3.10.6", "3.11.4"] diff --git a/.dependencies_installed b/.dependencies_installed new file mode 100644 index 0000000..e69de29 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..de6d529 --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +testing/ \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..7562375 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Opentensor + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..cf86ff7 --- /dev/null +++ b/README.md @@ -0,0 +1,133 @@ +
+ +# **Bittensor Subnet Template** +[![Discord Chat](https://img.shields.io/discord/308323056592486420.svg)](https://discord.gg/bittensor) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +--- + +## The Incentivized Internet + +[Discord](https://discord.gg/bittensor) • [Network](https://taostats.io/) • [Research](https://bittensor.com/whitepaper) +
+ +--- +- [Quickstarter template](#quickstarter-template) +- [Introduction](#introduction) + - [Example](#example) +- [Installation](#installation) + - [Before you proceed](#before-you-proceed) + - [Install](#install) +- [Writing your own incentive mechanism](#writing-your-own-incentive-mechanism) +- [Subnet Links](#subnet-links) +- [License](#license) + +--- +## Quickstarter template + +This template contains all the required installation instructions, scripts, and files and functions for: +- Building Bittensor subnets. +- Creating custom incentive mechanisms and running these mechanisms on the subnets. + +In order to simplify the building of subnets, this template abstracts away the complexity of the underlying blockchain and other boilerplate code. While the default behavior of the template is sufficient for a simple subnet, you should customize the template in order to meet your specific requirements. +--- + +## Introduction + +**IMPORTANT**: If you are new to Bittensor subnets, read this section before proceeding to [Installation](#installation) section. + +The Bittensor blockchain hosts multiple self-contained incentive mechanisms called **subnets**. Subnets are playing fields in which: +- Subnet miners who produce value, and +- Subnet validators who produce consensus + +determine together the proper distribution of TAO for the purpose of incentivizing the creation of value, i.e., generating digital commodities, such as intelligence or data. + +Each subnet consists of: +- Subnet miners and subnet validators. +- A protocol using which the subnet miners and subnet validators interact with one another. This protocol is part of the incentive mechanism. +- The Bittensor API using which the subnet miners and subnet validators interact with Bittensor's onchain consensus engine [Yuma Consensus](https://bittensor.com/documentation/validating/yuma-consensus). The Yuma Consensus is designed to drive these actors: subnet validators and subnet miners, into agreement on who is creating value and what that value is worth. + +This starter template is split into three primary files. To write your own incentive mechanism, you should edit these files. These files are: +1. `template/protocol.py`: Contains the definition of the protocol used by subnet miners and subnet validators. +2. `neurons/miner.py`: Script that defines the subnet miner's behavior, i.e., how the subnet miner responds to requests from subnet validators. +3. `neurons/validator.py`: This script defines the subnet validator's behavior, i.e., how the subnet validator requests information from the subnet miners and determines the scores. + +### Example + +The Bittensor Subnet 1 for Text Prompting is built using this template. See [Bittensor Text-Prompting](https://github.com/opentensor/text-prompting) for how to configure the files and how to add monitoring and telemetry and support multiple miner types. Also see this Subnet 1 in action on [Taostats](https://taostats.io/subnets/netuid-1/) explorer. + +--- + +## Installation + +### Before you proceed +Before you proceed with the installation of the subnet, note the following: + +- Use these instructions to run your subnet locally for your development and testing, or on Bittensor testnet or on Bittensor mainnet. +- **IMPORTANT**: We **strongly recommend** that you first run your subnet locally and complete your development and testing before running the subnet on Bittensor testnet. Furthermore, make sure that you next run your subnet on Bittensor testnet before running it on the Bittensor mainnet. +- You can run your subnet either as a subnet owner, or as a subnet validator or as a subnet miner. +- **IMPORTANT:** Make sure you are aware of the minimum compute requirements for your subnet. See the [Minimum compute YAML configuration](./min_compute.yml). +- Note that installation instructions differ based on your situation: For example, installing for local development and testing will require a few additional steps compared to installing for testnet. Similarly, installation instructions differ for a subnet owner vs a validator or a miner. + +### Install + +- **Running locally**: Follow the step-by-step instructions described in this section: [Running Subnet Locally](./docs/running_on_staging.md). +- **Running on Bittensor testnet**: Follow the step-by-step instructions described in this section: [Running on the Test Network](./docs/running_on_testnet.md). +- **Running on Bittensor mainnet**: Follow the step-by-step instructions described in this section: [Running on the Main Network](./docs/running_on_mainnet.md). + +--- + +## Writing your own incentive mechanism + +As described in [Quickstarter template](#quickstarter-template) section above, when you are ready to write your own incentive mechanism, update this template repository by editing the following files. The code in these files contains detailed documentation on how to update the template. Read the documentation in each of the files to understand how to update the template. There are multiple **TODO**s in each of the files identifying sections you should update. These files are: +- `template/protocol.py`: Contains the definition of the wire-protocol used by miners and validators. +- `neurons/miner.py`: Script that defines the miner's behavior, i.e., how the miner responds to requests from validators. +- `neurons/validator.py`: This script defines the validator's behavior, i.e., how the validator requests information from the miners and determines the scores. +- `template/forward.py`: Contains the definition of the validator's forward pass. +- `template/reward.py`: Contains the definition of how validators reward miner responses. + +In addition to the above files, you should also update the following files: +- `README.md`: This file contains the documentation for your project. Update this file to reflect your project's documentation. +- `CONTRIBUTING.md`: This file contains the instructions for contributing to your project. Update this file to reflect your project's contribution guidelines. +- `template/__init__.py`: This file contains the version of your project. +- `setup.py`: This file contains the metadata about your project. Update this file to reflect your project's metadata. +- `docs/`: This directory contains the documentation for your project. Update this directory to reflect your project's documentation. + +__Note__ +The `template` directory should also be renamed to your project name. +--- + +# Subnet Links +In order to see real-world examples of subnets in-action, see the `subnet_links.json` document or access them from inside the `template` package by: +```python +import template +template.SUBNET_LINKS +[{'name': 'sn0', 'url': ''}, + {'name': 'sn1', 'url': 'https://github.com/opentensor/text-prompting/'}, + {'name': 'sn2', 'url': 'https://github.com/bittranslateio/bittranslate/'}, + {'name': 'sn3', 'url': 'https://github.com/gitphantomman/scraping_subnet/'}, + {'name': 'sn4', 'url': 'https://github.com/manifold-inc/targon/'}, +... +] +``` + +## License +This repository is licensed under the MIT License. +```text +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +``` diff --git a/contrib/CODE_REVIEW_DOCS.md b/contrib/CODE_REVIEW_DOCS.md new file mode 100644 index 0000000..9909606 --- /dev/null +++ b/contrib/CODE_REVIEW_DOCS.md @@ -0,0 +1,72 @@ +# Code Review +### Conceptual Review + +A review can be a conceptual review, where the reviewer leaves a comment + * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull + request", + * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the + approach of this change". + +A `NACK` needs to include a rationale why the change is not worthwhile. +NACKs without accompanying reasoning may be disregarded. +After conceptual agreement on the change, code review can be provided. A review +begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR +branch, followed by a description of how the reviewer did the review. The +following language is used within pull request comments: + + - "I have tested the code", involving change-specific manual testing in + addition to running the unit, functional, or fuzz tests, and in case it is + not obvious how the manual testing was done, it should be described; + - "I have not tested the code, but I have reviewed it and it looks + OK, I agree it can be merged"; + - A "nit" refers to a trivial, often non-blocking issue. + +### Code Review +Project maintainers reserve the right to weigh the opinions of peer reviewers +using common sense judgement and may also weigh based on merit. Reviewers that +have demonstrated a deeper commitment and understanding of the project over time +or who have clear domain expertise may naturally have more weight, as one would +expect in all walks of life. + +Where a patch set affects consensus-critical code, the bar will be much +higher in terms of discussion and peer review requirements, keeping in mind that +mistakes could be very costly to the wider community. This includes refactoring +of consensus-critical code. + +Where a patch set proposes to change the Bittensor consensus, it must have been +discussed extensively on the discord server and other channels, be accompanied by a widely +discussed BIP and have a generally widely perceived technical consensus of being +a worthwhile change based on the judgement of the maintainers. + +### Finding Reviewers + +As most reviewers are themselves developers with their own projects, the review +process can be quite lengthy, and some amount of patience is required. If you find +that you've been waiting for a pull request to be given attention for several +months, there may be a number of reasons for this, some of which you can do something +about: + + - It may be because of a feature freeze due to an upcoming release. During this time, + only bug fixes are taken into consideration. If your pull request is a new feature, + it will not be prioritized until after the release. Wait for the release. + - It may be because the changes you are suggesting do not appeal to people. Rather than + nits and critique, which require effort and means they care enough to spend time on your + contribution, thundering silence is a good sign of widespread (mild) dislike of a given change + (because people don't assume *others* won't actually like the proposal). Don't take + that personally, though! Instead, take another critical look at what you are suggesting + and see if it: changes too much, is too broad, doesn't adhere to the + [developer notes](DEVELOPMENT_WORKFLOW.md), is dangerous or insecure, is messily written, etc. + Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give + their opinion on the concept itself. + - It may be because your code is too complex for all but a few people, and those people + may not have realized your pull request even exists. A great way to find people who + are qualified and care about the code you are touching is the + [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply + look up who last modified the code you are changing and see if you can find + them and give them a nudge. Don't be incessant about the nudging, though. + - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request + a look. If you think you've been waiting for an unreasonably long time (say, + more than a month) for no particular reason (a few lines changed, etc.), + this is totally fine. Try to return the favor when someone else is asking + for feedback on their code, and the universe balances out. + - Remember that the best thing you can do while waiting is give review to others! \ No newline at end of file diff --git a/contrib/CONTRIBUTING.md b/contrib/CONTRIBUTING.md new file mode 100644 index 0000000..ba33ce3 --- /dev/null +++ b/contrib/CONTRIBUTING.md @@ -0,0 +1,213 @@ +# Contributing to Bittensor Subnet Development + +The following is a set of guidelines for contributing to the Bittensor ecosystem. These are **HIGHLY RECOMMENDED** guidelines, but not hard-and-fast rules. Use your best judgment, and feel free to propose changes to this document in a pull request. + +## Table Of Contents +1. [How Can I Contribute?](#how-can-i-contribute) + 1. [Communication Channels](#communication-channels) + 1. [Code Contribution General Guideline](#code-contribution-general-guidelines) + 1. [Pull Request Philosophy](#pull-request-philosophy) + 1. [Pull Request Process](#pull-request-process) + 1. [Addressing Feedback](#addressing-feedback) + 1. [Squashing Commits](#squashing-commits) + 1. [Refactoring](#refactoring) + 1. [Peer Review](#peer-review) + 1. [Suggesting Features](#suggesting-enhancements-and-features) + + +## How Can I Contribute? +TODO(developer): Define your desired contribution procedure. + +## Communication Channels +TODO(developer): Place your communication channels here + +> Please follow the Bittensor Subnet [style guide](./STYLE.md) regardless of your contribution type. + +Here is a high-level summary: +- Code consistency is crucial; adhere to established programming language conventions. +- Use `black` to format your Python code; it ensures readability and consistency. +- Write concise Git commit messages; summarize changes in ~50 characters. +- Follow these six commit rules: + - Atomic Commits: Focus on one task or fix per commit. + - Subject and Body Separation: Use a blank line to separate the subject from the body. + - Subject Line Length: Keep it under 50 characters for readability. + - Imperative Mood: Write subject line as if giving a command or instruction. + - Body Text Width: Wrap text manually at 72 characters. + - Body Content: Explain what changed and why, not how. +- Make use of your commit messages to simplify project understanding and maintenance. + +> For clear examples of each of the commit rules, see the style guide's [rules](./STYLE.md#the-six-rules-of-a-great-commit) section. + +### Code Contribution General Guidelines + +> Review the Bittensor Subnet [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before contributing. + + +#### Pull Request Philosophy + +Patchsets and enhancements should always be focused. A pull request could add a feature, fix a bug, or refactor code, but it should not contain a mixture of these. Please also avoid 'super' pull requests which attempt to do too much, are overly large, or overly complex as this makes review difficult. + +Specifically, pull requests must adhere to the following criteria: +- Contain fewer than 50 files. PRs with more than 50 files will be closed. +- If a PR introduces a new feature, it *must* include corresponding tests. +- Other PRs (bug fixes, refactoring, etc.) should ideally also have tests, as they provide proof of concept and prevent regression. +- Categorize your PR properly by using GitHub labels. This aids in the review process by informing reviewers about the type of change at a glance. +- Make sure your code includes adequate comments. These should explain why certain decisions were made and how your changes work. +- If your changes are extensive, consider breaking your PR into smaller, related PRs. This makes your contributions easier to understand and review. +- Be active in the discussion about your PR. Respond promptly to comments and questions to help reviewers understand your changes and speed up the acceptance process. + +Generally, all pull requests must: + + - Have a clear use case, fix a demonstrable bug or serve the greater good of the project (e.g. refactoring for modularisation). + - Be well peer-reviewed. + - Follow code style guidelines. + - Not break the existing test suite. + - Where bugs are fixed, where possible, there should be unit tests demonstrating the bug and also proving the fix. + - Change relevant comments and documentation when behaviour of code changes. + +#### Pull Request Process + +Please follow these steps to have your contribution considered by the maintainers: + +*Before* creating the PR: +1. Read the [development workflow](./DEVELOPMENT_WORKFLOW.md) defined for this repository to understand our workflow. +2. Ensure your PR meets the criteria stated in the 'Pull Request Philosophy' section. +3. Include relevant tests for any fixed bugs or new features as stated in the [testing guide](./TESTING.md). +4. Ensure your commit messages are clear and concise. Include the issue number if applicable. +5. If you have multiple commits, rebase them into a single commit using `git rebase -i`. +6. Explain what your changes do and why you think they should be merged in the PR description consistent with the [style guide](./STYLE.md). + +*After* creating the PR: +1. Verify that all [status checks](https://help.github.com/articles/about-status-checks/) are passing after you submit your pull request. +2. Label your PR using GitHub's labeling feature. The labels help categorize the PR and streamline the review process. +3. Document your code with comments that provide a clear understanding of your changes. Explain any non-obvious parts of your code or design decisions you've made. +4. If your PR has extensive changes, consider splitting it into smaller, related PRs. This reduces the cognitive load on the reviewers and speeds up the review process. + +Please be responsive and participate in the discussion on your PR! This aids in clarifying any confusion or concerns and leads to quicker resolution and merging of your PR. + +> Note: If your changes are not ready for merge but you want feedback, create a draft pull request. + +Following these criteria will aid in quicker review and potential merging of your PR. +While the prerequisites above must be satisfied prior to having your pull request reviewed, the reviewer(s) may ask you to complete additional design work, tests, or other changes before your pull request can be ultimately accepted. + +When you are ready to submit your changes, create a pull request: + +> **Always** follow the [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before submitting pull requests. + +After you submit a pull request, it will be reviewed by the maintainers. They may ask you to make changes. Please respond to any comments and push your changes as a new commit. + +> Note: Be sure to merge the latest from "upstream" before making a pull request: + +```bash +git remote add upstream https://github.com/opentensor/bittensor.git # TODO(developer): replace with your repo URL +git fetch upstream +git merge upstream/ +git push origin +``` + +#### Addressing Feedback + +After submitting your pull request, expect comments and reviews from other contributors. You can add more commits to your pull request by committing them locally and pushing to your fork. + +You are expected to reply to any review comments before your pull request is merged. You may update the code or reject the feedback if you do not agree with it, but you should express so in a reply. If there is outstanding feedback and you are not actively working on it, your pull request may be closed. + +#### Squashing Commits + +If your pull request contains fixup commits (commits that change the same line of code repeatedly) or too fine-grained commits, you may be asked to [squash](https://git-scm.com/docs/git-rebase#_interactive_mode) your commits before it will be reviewed. The basic squashing workflow is shown below. + + git checkout your_branch_name + git rebase -i HEAD~n + # n is normally the number of commits in the pull request. + # Set commits (except the one in the first line) from 'pick' to 'squash', save and quit. + # On the next screen, edit/refine commit messages. + # Save and quit. + git push -f # (force push to GitHub) + +Please update the resulting commit message, if needed. It should read as a coherent message. In most cases, this means not just listing the interim commits. + +If your change contains a merge commit, the above workflow may not work and you will need to remove the merge commit first. See the next section for details on how to rebase. + +Please refrain from creating several pull requests for the same change. Use the pull request that is already open (or was created earlier) to amend changes. This preserves the discussion and review that happened earlier for the respective change set. + +The length of time required for peer review is unpredictable and will vary from pull request to pull request. + +#### Refactoring + +Refactoring is a necessary part of any software project's evolution. The following guidelines cover refactoring pull requests for the project. + +There are three categories of refactoring: code-only moves, code style fixes, and code refactoring. In general, refactoring pull requests should not mix these three kinds of activities in order to make refactoring pull requests easy to review and uncontroversial. In all cases, refactoring PRs must not change the behaviour of code within the pull request (bugs must be preserved as is). + +Project maintainers aim for a quick turnaround on refactoring pull requests, so where possible keep them short, uncomplex and easy to verify. + +Pull requests that refactor the code should not be made by new contributors. It requires a certain level of experience to know where the code belongs to and to understand the full ramification (including rebase effort of open pull requests). Trivial pull requests or pull requests that refactor the code with no clear benefits may be immediately closed by the maintainers to reduce unnecessary workload on reviewing. + +#### Peer Review + +Anyone may participate in peer review which is expressed by comments in the pull request. Typically reviewers will review the code for obvious errors, as well as test out the patch set and opine on the technical merits of the patch. Project maintainers take into account the peer review when determining if there is consensus to merge a pull request (remember that discussions may have taken place elsewhere, not just on GitHub). The following language is used within pull-request comments: + +- ACK means "I have tested the code and I agree it should be merged"; +- NACK means "I disagree this should be merged", and must be accompanied by sound technical justification. NACKs without accompanying reasoning may be disregarded; +- utACK means "I have not tested the code, but I have reviewed it and it looks OK, I agree it can be merged"; +- Concept ACK means "I agree in the general principle of this pull request"; +- Nit refers to trivial, often non-blocking issues. + +Reviewers should include the commit(s) they have reviewed in their comments. This can be done by copying the commit SHA1 hash. + +A pull request that changes consensus-critical code is considerably more involved than a pull request that adds a feature to the wallet, for example. Such patches must be reviewed and thoroughly tested by several reviewers who are knowledgeable about the changed subsystems. Where new features are proposed, it is helpful for reviewers to try out the patch set on a test network and indicate that they have done so in their review. Project maintainers will take this into consideration when merging changes. + +For a more detailed description of the review process, see the [Code Review Guidelines](CODE_REVIEW_DOCS.md). + +> **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue and include a link to the original issue in the body of your new one. + +#### How Do I Submit A (Good) Bug Report? + +Please track bugs as GitHub issues. + +Explain the problem and include additional details to help maintainers reproduce the problem: + +* **Use a clear and descriptive title** for the issue to identify the problem. +* **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started the application, e.g. which command exactly you used in the terminal, or how you started Bittensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran with a set of custom configs, explain if you used a config file or command line arguments. +* **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). +* **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. +* **Explain which behavior you expected to see instead and why.** +* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +* **If you're reporting that Bittensor crashed**, include a crash report with a stack trace from the operating system. On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". Include the crash report in the issue in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines), a [file attachment](https://help.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist. +* **If the problem is related to performance or memory**, include a CPU profile capture with your report, if you're using a GPU then include a GPU profile capture as well. Look into the [PyTorch Profiler](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html) to look at memory usage of your model. +* **If the problem wasn't triggered by a specific action**, describe what you were doing before the problem happened and share more information using the guidelines below. + +Provide more context by answering these questions: + +* **Did the problem start happening recently** (e.g. after updating to a new version) or was this always a problem? +* If the problem started happening recently, **can you reproduce the problem in an older version of Bittensor?** +* **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under which conditions it normally happens. + +Include details about your configuration and environment: + +* **Which version of Bittensor Subnet are you using?** +* **What commit hash are you on?** You can get the exact commit hash by checking `git log` and pasting the full commit hash. +* **What's the name and version of the OS you're using**? +* **Are you running Bittensor Subnet in a virtual machine?** If so, which VM software are you using and which operating systems and versions are used for the host and the guest? +* **Are you running Bittensor Subnet in a dockerized container?** If so, have you made sure that your docker container contains your latest changes and is up to date with Master branch? + +### Suggesting Enhancements and Features + +This section guides you through submitting an enhancement suggestion, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion :pencil: and find related suggestions :mag_right:. + +When you are creating an enhancement suggestion, please [include as many details as possible](#how-do-i-submit-a-good-enhancement-suggestion). Fill in [the template](https://bit.ly/atom-behavior-pr), including the steps that you imagine you would take if the feature you're requesting existed. + +#### Before Submitting An Enhancement Suggestion + +* **Check the [debugging guide](./DEBUGGING.md).** for tips — you might discover that the enhancement is already available. Most importantly, check if you're using the latest version of the project first. + +#### How Submit A (Good) Feature Suggestion + +* **Use a clear and descriptive title** for the issue to identify the problem. +* **Provide a step-by-step description of the suggested enhancement** in as many details as possible. +* **Provide specific examples to demonstrate the steps**. Include copy/pasteable snippets which you use in those examples, as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). +* **Describe the current behavior** and **explain which behavior you expected to see instead** and why. +* **Include screenshots and animated GIFs** which help you demonstrate the steps or point out the part of the project which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +* **Explain why this enhancement would be useful** to most users. +* **List some other text editors or applications where this enhancement exists.** +* **Specify the name and version of the OS you're using.** + +Thank you for considering contributing to Bittensor! Any help is greatly appreciated along this journey to incentivize open and permissionless intelligence. diff --git a/contrib/DEVELOPMENT_WORKFLOW.md b/contrib/DEVELOPMENT_WORKFLOW.md new file mode 100644 index 0000000..13bb07b --- /dev/null +++ b/contrib/DEVELOPMENT_WORKFLOW.md @@ -0,0 +1,165 @@ +# Bittensor Subnet Development Workflow + +This is a highly advisable workflow to follow to keep your subtensor project organized and foster ease of contribution. + +## Table of contents + +- [Bittensor Subnet Development Workflow](#bittensor-subnet-development-workflow) + - [Main Branches](#main-branches) + - [Development Model](#development-model) + - [Feature Branches](#feature-branches) + - [Release Branches](#release-branches) + - [Hotfix Branches](#hotfix-branches) + - [Git Operations](#git-operations) + - [Creating a Feature Branch](#creating-a-feature-branch) + - [Merging Feature Branch into Staging](#merging-feature-branch-into-staging) + - [Creating a Release Branch](#creating-a-release-branch) + - [Finishing a Release Branch](#finishing-a-release-branch) + - [Creating a Hotfix Branch](#creating-a-hotfix-branch) + - [Finishing a Hotfix Branch](#finishing-a-hotfix-branch) + - [Continuous Integration (CI) and Continuous Deployment (CD)](#continuous-integration-ci-and-continuous-deployment-cd) + - [Versioning and Release Notes](#versioning-and-release-notes) + - [Pending Tasks](#pending-tasks) + +## Main Branches + +Bittensor's codebase consists of two main branches: **main** and **staging**. + +**main** +- This is Bittensor's live production branch, which should only be updated by the core development team. This branch is protected, so refrain from pushing or merging into it unless authorized. + +**staging** +- This branch is continuously updated and is where you propose and merge changes. It's essentially Bittensor's active development branch. + +## Development Model + +### Feature Branches + +- Branch off from: `staging` +- Merge back into: `staging` +- Naming convention: `feature//` + +Feature branches are used to develop new features for upcoming or future releases. They exist as long as the feature is in development, but will eventually be merged into `staging` or discarded. Always delete your feature branch after merging to avoid unnecessary clutter. + +### Release Branches + +- Branch off from: `staging` +- Merge back into: `staging` and then `main` +- Naming convention: `release///` + +Release branches support the preparation of a new production release, allowing for minor bug fixes and preparation of metadata (version number, configuration, etc). All new features should be merged into `staging` and wait for the next big release. + +### Hotfix Branches + +General workflow: + +- Branch off from: `main` or `staging` +- Merge back into: `staging` then `main` +- Naming convention: `hotfix///` + +Hotfix branches are meant for quick fixes in the production environment. When a critical bug in a production version must be resolved immediately, a hotfix branch is created. + +## Git Operations + +#### Create a feature branch + +1. Branch from the **staging** branch. + 1. Command: `git checkout -b feature/my-feature staging` + +> Rebase frequently with the updated staging branch so you do not face big conflicts before submitting your pull request. Remember, syncing your changes with other developers could also help you avoid big conflicts. + +#### Merge feature branch into staging + +In other words, integrate your changes into a branch that will be tested and prepared for release. + +1. Switch branch to staging: `git checkout staging` +2. Merging feature branch into staging: `git merge --no-ff feature/my-feature` +3. Pushing changes to staging: `git push origin staging` +4. Delete feature branch: `git branch -d feature/my-feature` (alternatively, this can be navigated on the GitHub web UI) + +This operation is done by Github when merging a PR. + +So, what you have to keep in mind is: +- Open the PR against the `staging` branch. +- After merging a PR you should delete your feature branch. This will be strictly enforced. + +#### Creating a release branch + +1. Create branch from staging: `git checkout -b release/3.4.0/descriptive-message/creator's_name staging` +2. Updating version with major or minor: `./scripts/update_version.sh major|minor` +3. Commit file changes with new version: `git commit -a -m "Updated version to 3.4.0"` + + +#### Finishing a Release Branch + +This involves releasing stable code and generating a new version for bittensor. + +1. Switch branch to main: `git checkout main` +2. Merge release branch into main: `git merge --no-ff release/3.4.0/optional-descriptive-message` +3. Tag changeset: `git tag -a v3.4.0 -m "Releasing v3.4.0: some comment about it"` +4. Push changes to main: `git push origin main` +5. Push tags to origin: `git push origin --tags` + +To keep the changes made in the __release__ branch, we need to merge those back into `staging`: + +- Switch branch to staging: `git checkout staging`. +- Merging release branch into staging: `git merge --no-ff release/3.4.0/optional-descriptive-message` + +This step may well lead to a merge conflict (probably even, since we have changed the version number). If so, fix it and commit. + + +#### Creating a hotfix branch +1. Create branch from main: `git checkout -b hotfix/3.3.4/descriptive-message/creator's-name main` +2. Update patch version: `./scripts/update_version.sh patch` +3. Commit file changes with new version: `git commit -a -m "Updated version to 3.3.4"` +4. Fix the bug and commit the fix: `git commit -m "Fixed critical production issue X"` + +#### Finishing a Hotfix Branch + +Finishing a hotfix branch involves merging the bugfix into both `main` and `staging`. + +1. Switch branch to main: `git checkout main` +2. Merge hotfix into main: `git merge --no-ff hotfix/3.3.4/optional-descriptive-message` +3. Tag new version: `git tag -a v3.3.4 -m "Releasing v3.3.4: descriptive comment about the hotfix"` +4. Push changes to main: `git push origin main` +5. Push tags to origin: `git push origin --tags` +6. Switch branch to staging: `git checkout staging` +7. Merge hotfix into staging: `git merge --no-ff hotfix/3.3.4/descriptive-message/creator's-name` +8. Push changes to origin/staging: `git push origin staging` +9. Delete hotfix branch: `git branch -d hotfix/3.3.4/optional-descriptive-message` + +The one exception to the rule here is that, **when a release branch currently exists, the hotfix changes need to be merged into that release branch, instead of** `staging`. Back-merging the bugfix into the __release__ branch will eventually result in the bugfix being merged into `develop` too, when the release branch is finished. (If work in develop immediately requires this bugfix and cannot wait for the release branch to be finished, you may safely merge the bugfix into develop now already as well.) + +Finally, we remove the temporary branch: + +- `git branch -d hotfix/3.3.4/optional-descriptive-message` +## Continuous Integration (CI) and Continuous Deployment (CD) + +Continuous Integration (CI) is a software development practice where members of a team integrate their work frequently. Each integration is verified by an automated build and test process to detect integration errors as quickly as possible. + +Continuous Deployment (CD) is a software engineering approach in which software functionalities are delivered frequently through automated deployments. + +- **CircleCI job**: Create jobs in CircleCI to automate the merging of staging into main and release version (needed to release code) and building and testing Bittensor (needed to merge PRs). + +> It is highly recommended to set up your own circleci pipeline with your subnet + +## Versioning and Release Notes + +Semantic versioning helps keep track of the different versions of the software. When code is merged into main, generate a new version. + +Release notes provide documentation for each version released to the users, highlighting the new features, improvements, and bug fixes. When merged into main, generate GitHub release and release notes. + +## Pending Tasks + +Follow these steps when you are contributing to the bittensor subnet: + +- Determine if main and staging are different +- Determine what is in staging that is not merged yet + - Document not released developments + - When merged into staging, generate information about what's merged into staging but not released. + - When merged into main, generate GitHub release and release notes. +- CircleCI jobs + - Merge staging into main and release version (needed to release code) + - Build and Test Bittensor (needed to merge PRs) + +This document can be improved as the Bittensor project continues to develop and change. diff --git a/contrib/STYLE.md b/contrib/STYLE.md new file mode 100644 index 0000000..b7ac755 --- /dev/null +++ b/contrib/STYLE.md @@ -0,0 +1,348 @@ +# Style Guide + +A project’s long-term success rests (among other things) on its maintainability, and a maintainer has few tools more powerful than his or her project’s log. It’s worth taking the time to learn how to care for one properly. What may be a hassle at first soon becomes habit, and eventually a source of pride and productivity for all involved. + +Most programming languages have well-established conventions as to what constitutes idiomatic style, i.e. naming, formatting and so on. There are variations on these conventions, of course, but most developers agree that picking one and sticking to it is far better than the chaos that ensues when everybody does their own thing. + +# Table of Contents +1. [Code Style](#code-style) +2. [Naming Conventions](#naming-conventions) +3. [Git Commit Style](#git-commit-style) +4. [The Six Rules of a Great Commit](#the-six-rules-of-a-great-commit) + - [1. Atomic Commits](#1-atomic-commits) + - [2. Separate Subject from Body with a Blank Line](#2-separate-subject-from-body-with-a-blank-line) + - [3. Limit the Subject Line to 50 Characters](#3-limit-the-subject-line-to-50-characters) + - [4. Use the Imperative Mood in the Subject Line](#4-use-the-imperative-mood-in-the-subject-line) + - [5. Wrap the Body at 72 Characters](#5-wrap-the-body-at-72-characters) + - [6. Use the Body to Explain What and Why vs. How](#6-use-the-body-to-explain-what-and-why-vs-how) +5. [Tools Worth Mentioning](#tools-worth-mentioning) + - [Using `--fixup`](#using---fixup) + - [Interactive Rebase](#interactive-rebase) +6. [Pull Request and Squashing Commits Caveats](#pull-request-and-squashing-commits-caveats) + + +### Code style + +#### General Style +Python's official style guide is PEP 8, which provides conventions for writing code for the main Python distribution. Here are some key points: + +- `Indentation:` Use 4 spaces per indentation level. + +- `Line Length:` Limit all lines to a maximum of 79 characters. + +- `Blank Lines:` Surround top-level function and class definitions with two blank lines. Method definitions inside a class are surrounded by a single blank line. + +- `Imports:` Imports should usually be on separate lines and should be grouped in the following order: + + - Standard library imports. + - Related third party imports. + - Local application/library specific imports. +- `Whitespace:` Avoid extraneous whitespace in the following situations: + + - Immediately inside parentheses, brackets or braces. + - Immediately before a comma, semicolon, or colon. + - Immediately before the open parenthesis that starts the argument list of a function call. +- `Comments:` Comments should be complete sentences and should be used to clarify code and are not a substitute for poorly written code. + +#### For Python + +- `List Comprehensions:` Use list comprehensions for concise and readable creation of lists. + +- `Generators:` Use generators when dealing with large amounts of data to save memory. + +- `Context Managers:` Use context managers (with statement) for resource management. + +- `String Formatting:` Use f-strings for formatting strings in Python 3.6 and above. + +- `Error Handling:` Use exceptions for error handling whenever possible. + +#### More details + +Use `black` to format your python code before commiting for consistency across such a large pool of contributors. Black's code [style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#code-style) ensures consistent and opinionated code formatting. It automatically formats your Python code according to the Black style guide, enhancing code readability and maintainability. + +Key Features of Black: + + Consistency: Black enforces a single, consistent coding style across your project, eliminating style debates and allowing developers to focus on code logic. + + Readability: By applying a standard formatting style, Black improves code readability, making it easier to understand and collaborate on projects. + + Automation: Black automates the code formatting process, saving time and effort. It eliminates the need for manual formatting and reduces the likelihood of inconsistencies. + +### Naming Conventions + +- `Classes:` Class names should normally use the CapWords Convention. +- `Functions and Variables:` Function names should be lowercase, with words separated by underscores as necessary to improve readability. Variable names follow the same convention as function names. + +- `Constants:` Constants are usually defined on a module level and written in all capital letters with underscores separating words. + +- `Non-public Methods and Instance Variables:` Use a single leading underscore (_). This is a weak "internal use" indicator. + +- `Strongly "private" methods and variables:` Use a double leading underscore (__). This triggers name mangling in Python. + + +### Git commit style + +Here’s a model Git commit message when contributing: +``` +Summarize changes in around 50 characters or less + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of the commit and the rest of the text as the body. The +blank line separating the summary from the body is critical (unless +you omit the body entirely); various tools like `log`, `shortlog` +and `rebase` can get confused if you run the two together. + +Explain the problem that this commit is solving. Focus on why you +are making this change as opposed to how (the code explains that). +Are there side effects or other unintuitive consequences of this +change? Here's the place to explain them. + +Further paragraphs come after blank lines. + + - Bullet points are okay, too + + - Typically a hyphen or asterisk is used for the bullet, preceded + by a single space, with blank lines in between, but conventions + vary here + +If you use an issue tracker, put references to them at the bottom, +like this: + +Resolves: #123 +See also: #456, #789 +``` + + +## The six rules of a great commit. + +#### 1. Atomic Commits +An “atomic” change revolves around one task or one fix. + +Atomic Approach + - Commit each fix or task as a separate change + - Only commit when a block of work is complete + - Commit each layout change separately + - Joint commit for layout file, code behind file, and additional resources + +Benefits + +- Easy to roll back without affecting other changes +- Easy to make other changes on the fly +- Easy to merge features to other branches + +#### Avoid trivial commit messages + +Commit messages like "fix", "fix2", or "fix3" don't provide any context or clear understanding of what changes the commit introduces. Here are some examples of good vs. bad commit messages: + +**Bad Commit Message:** + + $ git commit -m "fix" + +**Good Commit Message:** + + $ git commit -m "Fix typo in README file" + +> **Caveat**: When working with new features, an atomic commit will often consist of multiple files, since a layout file, code behind file, and additional resources may have been added/modified. You don’t want to commit all of these separately, because if you had to roll back the application to a state before the feature was added, it would involve multiple commit entries, and that can get confusing + +#### 2. Separate subject from body with a blank line + +Not every commit requires both a subject and a body. Sometimes a single line is fine, especially when the change is so simple that no further context is necessary. + +For example: + + Fix typo in introduction to user guide + +Nothing more need be said; if the reader wonders what the typo was, she can simply take a look at the change itself, i.e. use git show or git diff or git log -p. + +If you’re committing something like this at the command line, it’s easy to use the -m option to git commit: + + $ git commit -m"Fix typo in introduction to user guide" + +However, when a commit merits a bit of explanation and context, you need to write a body. For example: + + Derezz the master control program + + MCP turned out to be evil and had become intent on world domination. + This commit throws Tron's disc into MCP (causing its deresolution) + and turns it back into a chess game. + +Commit messages with bodies are not so easy to write with the -m option. You’re better off writing the message in a proper text editor. [See Pro Git](https://git-scm.com/book/en/v2/Customizing-Git-Git-Configuration). + +In any case, the separation of subject from body pays off when browsing the log. Here’s the full log entry: + + $ git log + commit 42e769bdf4894310333942ffc5a15151222a87be + Author: Kevin Flynn + Date: Fri Jan 01 00:00:00 1982 -0200 + + Derezz the master control program + + MCP turned out to be evil and had become intent on world domination. + This commit throws Tron's disc into MCP (causing its deresolution) + and turns it back into a chess game. + + +#### 3. Limit the subject line to 50 characters +50 characters is not a hard limit, just a rule of thumb. Keeping subject lines at this length ensures that they are readable, and forces the author to think for a moment about the most concise way to explain what’s going on. + +GitHub’s UI is fully aware of these conventions. It will warn you if you go past the 50 character limit. Git will truncate any subject line longer than 72 characters with an ellipsis, thus keeping it to 50 is best practice. + +#### 4. Use the imperative mood in the subject line +Imperative mood just means “spoken or written as if giving a command or instruction”. A few examples: + + Clean your room + Close the door + Take out the trash + +Each of the seven rules you’re reading about right now are written in the imperative (“Wrap the body at 72 characters”, etc.). + +The imperative can sound a little rude; that’s why we don’t often use it. But it’s perfect for Git commit subject lines. One reason for this is that Git itself uses the imperative whenever it creates a commit on your behalf. + +For example, the default message created when using git merge reads: + + Merge branch 'myfeature' + +And when using git revert: + + Revert "Add the thing with the stuff" + + This reverts commit cc87791524aedd593cff5a74532befe7ab69ce9d. + +Or when clicking the “Merge” button on a GitHub pull request: + + Merge pull request #123 from someuser/somebranch + +So when you write your commit messages in the imperative, you’re following Git’s own built-in conventions. For example: + + Refactor subsystem X for readability + Update getting started documentation + Remove deprecated methods + Release version 1.0.0 + +Writing this way can be a little awkward at first. We’re more used to speaking in the indicative mood, which is all about reporting facts. That’s why commit messages often end up reading like this: + + Fixed bug with Y + Changing behavior of X + +And sometimes commit messages get written as a description of their contents: + + More fixes for broken stuff + Sweet new API methods + +To remove any confusion, here’s a simple rule to get it right every time. + +**A properly formed Git commit subject line should always be able to complete the following sentence:** + + If applied, this commit will + +For example: + + If applied, this commit will refactor subsystem X for readability + If applied, this commit will update getting started documentation + If applied, this commit will remove deprecated methods + If applied, this commit will release version 1.0.0 + If applied, this commit will merge pull request #123 from user/branch + +#### 5. Wrap the body at 72 characters +Git never wraps text automatically. When you write the body of a commit message, you must mind its right margin, and wrap text manually. + +The recommendation is to do this at 72 characters, so that Git has plenty of room to indent text while still keeping everything under 80 characters overall. + +A good text editor can help here. It’s easy to configure Vim, for example, to wrap text at 72 characters when you’re writing a Git commit. + +#### 6. Use the body to explain what and why vs. how +This [commit](https://github.com/bitcoin/bitcoin/commit/eb0b56b19017ab5c16c745e6da39c53126924ed6) from Bitcoin Core is a great example of explaining what changed and why: + +``` +commit eb0b56b19017ab5c16c745e6da39c53126924ed6 +Author: Pieter Wuille +Date: Fri Aug 1 22:57:55 2014 +0200 + + Simplify serialize.h's exception handling + + Remove the 'state' and 'exceptmask' from serialize.h's stream + implementations, as well as related methods. + + As exceptmask always included 'failbit', and setstate was always + called with bits = failbit, all it did was immediately raise an + exception. Get rid of those variables, and replace the setstate + with direct exception throwing (which also removes some dead + code). + + As a result, good() is never reached after a failure (there are + only 2 calls, one of which is in tests), and can just be replaced + by !eof(). + + fail(), clear(n) and exceptions() are just never called. Delete + them. +``` + +Take a look at the [full diff](https://github.com/bitcoin/bitcoin/commit/eb0b56b19017ab5c16c745e6da39c53126924ed6) and just think how much time the author is saving fellow and future committers by taking the time to provide this context here and now. If he didn’t, it would probably be lost forever. + +In most cases, you can leave out details about how a change has been made. Code is generally self-explanatory in this regard (and if the code is so complex that it needs to be explained in prose, that’s what source comments are for). Just focus on making clear the reasons why you made the change in the first place—the way things worked before the change (and what was wrong with that), the way they work now, and why you decided to solve it the way you did. + +The future maintainer that thanks you may be yourself! + + + +#### Tools worth mentioning + +##### Using `--fixup` + +If you've made a commit and then realize you've missed something or made a minor mistake, you can use the `--fixup` option. + +For example, suppose you've made a commit with a hash `9fceb02`. Later, you realize you've left a debug statement in your code. Instead of making a new commit titled "remove debug statement" or "fix", you can do the following: + + $ git commit --fixup 9fceb02 + +This will create a new commit to fix the issue, with a message like "fixup! The original commit message". + +##### Interactive Rebase + +Interactive rebase, or `rebase -i`, can be used to squash these fixup commits into the original commits they're fixing, which cleans up your commit history. You can use the `autosquash` option to automatically squash any commits marked as "fixup" into their target commits. + +For example: + + $ git rebase -i --autosquash HEAD~5 + +This command starts an interactive rebase for the last 5 commits (`HEAD~5`). Any commits marked as "fixup" will be automatically moved to squash with their target commits. + +The benefit of using `--fixup` and interactive rebase is that it keeps your commit history clean and readable. It groups fixes with the commits they are related to, rather than having a separate "fix" commit that might not make sense to other developers (or even to you) in the future. + + +--- + +#### Pull Request and Squashing Commits Caveats + +While atomic commits are great for development and for understanding the changes within the branch, the commit history can get messy when merging to the main branch. To keep a cleaner and more understandable commit history in our main branch, we encourage squashing all the commits of a PR into one when merging. + +This single commit should provide an overview of the changes that the PR introduced. It should follow the guidelines for atomic commits (an atomic commit is complete, self-contained, and understandable) but on the scale of the entire feature, task, or fix that the PR addresses. This approach combines the benefits of atomic commits during development with a clean commit history in our main branch. + +Here is how you can squash commits: + +```bash +git rebase -i HEAD~n +``` + +where `n` is the number of commits to squash. After running the command, replace `pick` with `squash` for the commits you want to squash into the previous commit. This will combine the commits and allow you to write a new commit message. + +In this context, an atomic commit message could look like: + +``` +Add feature X + +This commit introduces feature X which does A, B, and C. It adds +new files for layout, updates the code behind the file, and introduces +new resources. This change is important because it allows users to +perform task Y more efficiently. + +It includes: +- Creation of new layout file +- Updates in the code-behind file +- Addition of new resources + +Resolves: #123 +``` + +In your PRs, remember to detail what the PR is introducing or fixing. This will be helpful for reviewers to understand the context and the reason behind the changes. diff --git a/docs/running_on_mainnet.md b/docs/running_on_mainnet.md new file mode 100644 index 0000000..38be00a --- /dev/null +++ b/docs/running_on_mainnet.md @@ -0,0 +1,244 @@ +# Running Subnet on Mainnet + +This tutorial shows how to use the bittensor `btcli` to create a subnetwork and connect your incentive mechanism to it. + +**IMPORTANT:** Before attempting to register on mainnet, we strongly recommend that you: +- First run [Running Subnet Locally](running_on_staging.md), and +- Then run [Running on the Testnet](running_on_testnet.md). + +Your incentive mechanisms running on the mainnet are open to anyone. They emit real TAO. Creating these mechanisms incur a `lock_cost` in TAO. + +**DANGER** +- Do not expose your private keys. +- Only use your testnet wallet. +- Do not reuse the password of your mainnet wallet. +- Make sure your incentive mechanism is resistant to abuse. + +## Prerequisites + +Before proceeding further, make sure that you have installed Bittensor. See the below instructions: + +- [Install `bittensor`](https://github.com/opentensor/bittensor#install). + +After installing `bittensor`, proceed as below: + +## Steps + +## 1. Install your subnet template + +**NOTE: Skip this step if** you already did this during local testing and development. + +In your project directory: + +```bash +git clone https://github.com/opentensor/bittensor-subnet-template.git +``` + +Next, `cd` into `bittensor-subnet-template` repo directory: + +```bash +cd bittensor-subnet-template +``` + +Install the Bittensor subnet template package: + +```bash +python -m pip install -e . # Install your subnet template package +``` + +## 2. Create wallets + +Create wallets for subnet owner, subnet validator and for subnet miner. + +This step creates local coldkey and hotkey pairs for your three identities: subnet owner, subnet validator and subnet miner. + +The owner will create and control the subnet. The owner must have at least 100 TAO before the owner can run next steps. + +The validator and miner will be registered to the subnet created by the owner. This ensures that the validator and miner can run the respective validator and miner scripts. + +**NOTE**: You can also use existing wallets to register. Creating new keys is shown here for reference. + +Create a coldkey for the owner wallet: + +```bash +btcli wallet new_coldkey --wallet.name owner +``` + +Create a coldkey and hotkey for the subnet miner wallet: +```bash +btcli wallet new_coldkey --wallet.name miner +``` + +and + +```bash +btcli wallet new_hotkey --wallet.name miner --wallet.hotkey default +``` + +Create a coldkey and hotkey for the subnet validator wallet: + +```bash +btcli wallet new_coldkey --wallet.name validator +``` + +and + +```bash +btcli wallet new_hotkey --wallet.name validator --wallet.hotkey default +``` + +## 3. Getting the price of subnet creation + +Creating subnets on mainnet is competitive. The cost is determined by the rate at which new subnets are being registered onto the Bittensor blockchain. + +By default you must have at least 100 TAO on your owner wallet to create a subnet. However, the exact amount will fluctuate based on demand. The below code shows how to get the current price of creating a subnet. + +```bash +btcli subnet lock_cost +``` + +The above command will show: + +```bash +>> Subnet lock cost: τ100.000000000 +``` + +## 4. Purchasing a slot + +Using your TAO balance, you can register your subnet to the mainchain. This will create a new subnet on the mainchain and give you the owner permissions to it. The below command shows how to purchase a slot. + +**NOTE**: Slots cost TAO to lock. You will get this TAO back when the subnet is deregistered. + +```bash +btcli subnet create +``` + +Enter the owner wallet name. This gives permissions to the coldkey. + +```bash +>> Enter wallet name (default): owner # Enter your owner wallet name +>> Enter password to unlock key: # Enter your wallet password. +>> Register subnet? [y/n]: # Select yes (y) +>> ⠇ 📡 Registering subnet... +✅ Registered subnetwork with netuid: 1 # Your subnet netuid will show here, save this for later. +``` + +## 5. (Optional) Register keys + +**NOTE**: While this is not enforced, we recommend subnet owners to run a subnet validator and a subnet miner on the subnet to demonstrate proper use to the community. + +This step registers your subnet validator and subnet miner keys to the subnet giving them the **first two slots** on the subnet. + +Register your miner key to the subnet: + +```bash +btcli subnet recycle_register --netuid 1 --subtensor.network finney --wallet.name miner --wallet.hotkey default +``` + +Follow the below prompts: + +```bash +>> Enter netuid [1] (1): # Enter netuid 1 to specify the subnet you just created. +>> Continue Registration? + hotkey: ... + coldkey: ... + network: finney [y/n]: # Select yes (y) +>> ✅ Registered +``` + +Next, register your validator key to the subnet: + +```bash +btcli subnet recycle_register --netuid 1 --subtensor.network finney --wallet.name validator --wallet.hotkey default +``` + +Follow the below prompts: + +```bash +>> Enter netuid [1] (1): # Enter netuid 1 to specify the subnet you just created. +>> Continue Registration? + hotkey: ... + coldkey: ... + network: finney [y/n]: # Select yes (y) +>> ✅ Registered +``` + +## 6. Check that your keys have been registered + +Check that your subnet validator key has been registered: + +```bash +btcli wallet overview --wallet.name validator +``` + +The output will be similar to the below: + +```bash +Subnet: 1 +COLDKEY HOTKEY UID ACTIVE STAKE(τ) RANK TRUST CONSENSUS INCENTIVE DIVIDENDS EMISSION(ρ) VTRUST VPERMIT UPDATED AXON HOTKEY_SS58 +miner default 0 True 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0 0.00000 14 none 5GTFrsEQfvTsh3WjiEVFeKzFTc2xcf… +1 1 2 τ0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 ρ0 0.00000 + Wallet balance: τ0.0 +``` + +Check that your subnet miner has been registered: + +```bash +btcli wallet overview --wallet.name miner +``` + +The output will be similar to the below: + +```bash +Subnet: 1 +COLDKEY HOTKEY UID ACTIVE STAKE(τ) RANK TRUST CONSENSUS INCENTIVE DIVIDENDS EMISSION(ρ) VTRUST VPERMIT UPDATED AXON HOTKEY_SS58 +miner default 1 True 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0 0.00000 14 none 5GTFrsEQfvTsh3WjiEVFeKzFTc2xcf… +1 1 2 τ0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 ρ0 0.00000 + Wallet balance: τ0.0 +``` + +## 7. Run subnet miner and subnet validator + +Run the subnet miner: + +```bash +python neurons/miner.py --netuid 1 --wallet.name miner --wallet.hotkey default --logging.debug +``` + +You will see the below terminal output: + +```bash +>> 2023-08-08 16:58:11.223 | INFO | Running miner for subnet: 1 on network: wss://entrypoint-finney.opentensor.ai:443 with config: ... +``` + +Run the subnet validator: + +```bash +python neurons/validator.py --netuid 1 --wallet.name validator --wallet.hotkey default --logging.debug +``` + +You will see the below terminal output: + +```bash +>> 2023-08-08 16:58:11.223 | INFO | Running validator for subnet: 1 on network: wss://entrypoint-finney.opentensor.ai:443 with config: ... +``` + +## 8. Get emissions flowing + +Register to the root subnet using the `btcli`: + +```bash +btcli root register +``` + +Then set your weights for the subnet: + +```bash +btcli root weights +``` + +## 9. Stopping your nodes + +To stop your nodes, press CTRL + C in the terminal where the nodes are running. + +--- \ No newline at end of file diff --git a/docs/running_on_staging.md b/docs/running_on_staging.md new file mode 100644 index 0000000..6eeb4d5 --- /dev/null +++ b/docs/running_on_staging.md @@ -0,0 +1,340 @@ +# Running Subnet Locally + +This tutorial will guide you through: + +- Setting up a local blockchain that is not connected to either Bittensor testchain or mainchain +- Creating a subnet +- Run your incentive mechanism on the subnet. + +## Local blockchain vs local subtensor node + +Running a local blockchain is sometimes synonymously referred as running on staging. This is **different** from running a local subtensor node that connects to the Bittensor mainchain. + +A local subtensor node will connect to the mainchain and sync with the mainchain, giving you your own access point to the mainchain. + +Running a local blockchain spins up two authority nodes locally, not connected to any other nodes or testchain or mainchain. This tutorial is for running a local blockchain. + +## Prerequisites + +Before proceeding further, make sure that you have installed Bittensor. See the below instructions: + +- [Install `bittensor`](https://github.com/opentensor/bittensor#install). + +After installing `bittensor`, proceed as below: + +## 1. Install Substrate dependencies + +Begin by installing the required dependencies for running a Substrate node. + +Update your system packages: + +```bash +sudo apt update +``` + +Install additional required libraries and tools + +```bash +sudo apt install --assume-yes make build-essential git clang curl libssl-dev llvm libudev-dev protobuf-compiler +``` + +## 2. Install Rust and Cargo + +Rust is the programming language used in Substrate development. Cargo is Rust package manager. + +Install rust and cargo: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +Update your shell's source to include Cargo's path: + +```bash +source "$HOME/.cargo/env" +``` + +## 3. Clone the subtensor repository + +This step fetches the subtensor codebase to your local machine. + +```bash +git clone https://github.com/opentensor/subtensor.git +``` + +## 4. Setup Rust + +This step ensures that you have the nightly toolchain and the WebAssembly (wasm) compilation target. Note that this step will run the subtensor chain on your terminal directly, hence we advise that you run this as a background process using PM2 or other software. + +Update to the nightly version of Rust: + +```bash +./subtensor/scripts/init.sh +``` + +## 5. Initialize + +These steps initialize your local subtensor chain in development mode. These commands will set up and run a local subtensor. + +Build the binary with the faucet feature enabled: + +```bash +cargo build --release --features pow-faucet +``` + +**NOTE**: The `--features pow-faucet` option in the above is required if we want to use the command `btcli wallet faucet` [See the below Mint tokens step](#8-mint-tokens-from-faucet). + +Next, run the localnet script and turn off the attempt to build the binary (as we have already done this above): + +```bash +BUILD_BINARY=0 ./scripts/localnet.sh +``` + +**NOTE**: Watch for any build or initialization outputs in this step. If you are building the project for the first time, this step will take a while to finish building, depending on your hardware. + +## 6. Install subnet template + +`cd` to your project directory and clone the bittensor subnet template repository: + +```bash +git clone https://github.com/opentensor/bittensor-subnet-template.git +``` + +Navigate to the cloned repository: + +```bash +cd bittensor-subnet-template +``` + +Install the bittensor-subnet-template Python package: + +```bash +python -m pip install -e . +``` + +## 7. Set up wallets + +You will need wallets for the different roles, i.e., subnet owner, subnet validator and subnet miner, in the subnet. + +- The owner wallet creates and controls the subnet. +- The validator and miner will be registered to the subnet created by the owner. This ensures that the validator and miner can run the respective validator and miner scripts. + +Create a coldkey for the owner role: + +```bash +btcli wallet new_coldkey --wallet.name owner +``` + +Set up the miner's wallets: + +```bash +btcli wallet new_coldkey --wallet.name miner +``` + +```bash +btcli wallet new_hotkey --wallet.name miner --wallet.hotkey default +``` + +Set up the validator's wallets: + +```bash +btcli wallet new_coldkey --wallet.name validator +``` +```bash +btcli wallet new_hotkey --wallet.name validator --wallet.hotkey default +``` + +## 8. Mint tokens from faucet + +You will need tokens to initialize the intentive mechanism on the chain as well as for registering the subnet. + +Run the following commands to mint faucet tokens for the owner and for the validator. + +Mint faucet tokens for the owner: + +```bash +btcli wallet faucet --wallet.name owner --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +You will see: + +```bash +>> Balance: τ0.000000000 ➡ τ100.000000000 +``` + +Mint tokens for the validator: + +```bash +btcli wallet faucet --wallet.name validator --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +You will see: + +```bash +>> Balance: τ0.000000000 ➡ τ100.000000000 +``` + +## 9. Create a subnet + +The below commands establish a new subnet on the local chain. The cost will be exactly τ1000.000000000 for the first subnet you create and you'll have to run the faucet several times to get enough tokens. + +```bash +btcli subnet create --wallet.name owner --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +You will see: + +```bash +>> Your balance is: τ200.000000000 +>> Do you want to register a subnet for τ1000.000000000? [y/n]: +>> Enter password to unlock key: [YOUR_PASSWORD] +>> ✅ Registered subnetwork with netuid: 1 +``` + +**NOTE**: The local chain will now have a default `netuid` of 1. The second registration will create a `netuid` 2 and so on, until you reach the subnet limit of 8. If you register more than 8 subnets, then a subnet with the least staked TAO will be replaced by the 9th subnet you register. + +## 10. Register keys + +Register your subnet validator and subnet miner on the subnet. This gives your two keys unique slots on the subnet. The subnet has a current limit of 128 slots. + +Register the subnet miner: + +```bash +btcli subnet register --wallet.name miner --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +Follow the below prompts: + +```bash +>> Enter netuid [1] (1): 1 +>> Continue Registration? [y/n]: y +>> ✅ Registered +``` + +Register the subnet validator: + +```bash + +btcli subnet register --wallet.name validator --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +Follow the below prompts: + +``` +>> Enter netuid [1] (1): 1 +>> Continue Registration? [y/n]: y +>> ✅ Registered +``` + +## 11. Add stake + +This step bootstraps the incentives on your new subnet by adding stake into its incentive mechanism. + +```bash +btcli stake add --wallet.name validator --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +Follow the below prompts: + +```bash +>> Stake all Tao from account: 'validator'? [y/n]: y +>> Stake: + τ0.000000000 ➡ τ100.000000000 +``` + +## 12. Validate key registrations + +Verify that both the miner and validator keys are successfully registered: + +```bash +btcli subnet list --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +You will see the `2` entry under `NEURONS` column for the `NETUID` of 1, indicating that you have registered a validator and a miner in this subnet: + +```bash +NETUID NEURONS MAX_N DIFFICULTY TEMPO CON_REQ EMISSION BURN(τ) + 1 2 256.00 10.00 M 1000 None 0.00% τ1.00000 + 2 128 +``` + +See the subnet validator's registered details: + +```bash +btcli wallet overview --wallet.name validator --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +You will see: + +``` +Subnet: 1 +COLDKEY HOTKEY UID ACTIVE STAKE(τ) RANK TRUST CONSENSUS INCENTIVE DIVIDENDS EMISSION(ρ) VTRUST VPERMIT UPDATED AXON HOTKEY_SS58 +miner default 0 True 100.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0 0.00000 14 none 5GTFrsEQfvTsh3WjiEVFeKzFTc2xcf… +1 1 2 τ100.00000 0.00000 0.00000 0.00000 0.00000 0.00000 ρ0 0.00000 + Wallet balance: τ0.0 +``` + +See the subnet miner's registered details: + +```bash +btcli wallet overview --wallet.name miner --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +You will see: + +```bash +Subnet: 1 +COLDKEY HOTKEY UID ACTIVE STAKE(τ) RANK TRUST CONSENSUS INCENTIVE DIVIDENDS EMISSION(ρ) VTRUST VPERMIT UPDATED AXON HOTKEY_SS58 +miner default 1 True 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0 0.00000 14 none 5GTFrsEQfvTsh3WjiEVFeKzFTc2xcf… +1 1 2 τ0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 ρ0 0.00000 + Wallet balance: τ0.0 + +``` + +## 13. Run subnet miner and subnet validator + +Run the subnet miner and subnet validator. Make sure to specify your subnet parameters. + +Run the subnet miner: + +```bash +python neurons/miner.py --netuid 1 --subtensor.chain_endpoint ws://127.0.0.1:9946 --wallet.name miner --wallet.hotkey default --logging.debug +``` + +Run the subnet validator: + +```bash +python neurons/validator.py --netuid 1 --subtensor.chain_endpoint ws://127.0.0.1:9946 --wallet.name validator --wallet.hotkey default --logging.debug +``` + +## 14. Set weights for your subnet + +Register a validator on the root subnet and boost to set weights for your subnet. This is a necessary step to ensure that the subnet is able to receive emmissions. + +### Register your validator on the root subnet + +```bash +btcli root register --wallet.name validator --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +### Boost your subnet on the root subnet +```bash +btcli root boost --netuid 1 --increase 1 --wallet.name validator --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +## 15. Verify your incentive mechanism + +After a few blocks the subnet validator will set weights. This indicates that the incentive mechanism is active. Then after a subnet tempo elapses (360 blocks or 72 minutes) you will see your incentive mechanism beginning to distribute TAO to the subnet miner. + +```bash +btcli wallet overview --wallet.name miner --subtensor.chain_endpoint ws://127.0.0.1:9946 +``` + +## Ending your session + +To halt your nodes: +```bash +# Press CTRL + C keys in the terminal. +``` + +--- diff --git a/docs/running_on_testnet.md b/docs/running_on_testnet.md new file mode 100644 index 0000000..df3b01c --- /dev/null +++ b/docs/running_on_testnet.md @@ -0,0 +1,242 @@ +# Running Subnet on Testnet + +This tutorial shows how to use the Bittensor testnet to create a subnet and run your incentive mechanism on it. + +**IMPORTANT:** We strongly recommend that you first run [Running Subnet Locally](running_on_staging.md) before running on the testnet. Incentive mechanisms running on the testnet are open to anyone, and although these mechanisms on testnet do not emit real TAO, they cost you test TAO which you must create. + +**DANGER** +- Do not expose your private keys. +- Only use your testnet wallet. +- Do not reuse the password of your mainnet wallet. +- Make sure your incentive mechanism is resistant to abuse. + +## Prerequisites + +Before proceeding further, make sure that you have installed Bittensor. See the below instructions: + +- [Install `bittensor`](https://github.com/opentensor/bittensor#install). + +After installing `bittensor`, proceed as below: + +## 1. Install Bittensor subnet template + +**NOTE: Skip this step if** you already did this during local testing and development. + +`cd` into your project directory and clone the bittensor-subnet-template repo: + +```bash +git clone https://github.com/opentensor/bittensor-subnet-template.git +``` + +Next, `cd` into bittensor-subnet-template repo directory: + +```bash +cd bittensor-subnet-template # Enter the +``` + +Install the bittensor-subnet-template package: + +```bash +python -m pip install -e . +``` + +## 2. Create wallets + +Create wallets for subnet owner, subnet validator and for subnet miner. + +This step creates local coldkey and hotkey pairs for your three identities: subnet owner, subnet validator and subnet miner. + +The owner will create and control the subnet. The owner must have at least 100 testnet TAO before the owner can run next steps. + +The validator and miner will be registered to the subnet created by the owner. This ensures that the validator and miner can run the respective validator and miner scripts. + +Create a coldkey for your owner wallet: + +```bash +btcli wallet new_coldkey --wallet.name owner +``` + +Create a coldkey and hotkey for your miner wallet: + +```bash +btcli wallet new_coldkey --wallet.name miner +``` + +and + +```bash +btcli wallet new_hotkey --wallet.name miner --wallet.hotkey default +``` + +Create a coldkey and hotkey for your validator wallet: + +```bash +btcli wallet new_coldkey --wallet.name validator +``` + +and + +```bash +btcli wallet new_hotkey --wallet.name validator --wallet.hotkey default +``` + +## 3. Get the price of subnet creation + +Creating subnets on the testnet is competitive. The cost is determined by the rate at which new subnets are being registered onto the chain. + +By default you must have at least 100 testnet TAO in your owner wallet to create a subnet. However, the exact amount will fluctuate based on demand. The below command shows how to get the current price of creating a subnet. + +```bash +btcli subnet lock_cost --subtensor.network test +``` + +The above command will show: + +```bash +>> Subnet lock cost: τ100.000000000 +``` + +## 4. (Optional) Get faucet tokens + +Faucet is disabled on the testnet. Hence, if you don't have sufficient faucet tokens, ask the [Bittensor Discord community](https://discord.com/channels/799672011265015819/830068283314929684) for faucet tokens. + +## 5. Purchase a slot + +Using the test TAO from the previous step you can register your subnet on the testnet. This will create a new subnet on the testnet and give you the owner permissions to it. + +The below command shows how to purchase a slot. + +**NOTE**: Slots cost TAO to lock. You will get this TAO back when the subnet is deregistered. + +```bash +btcli subnet create --subtensor.network test +``` + +Enter the owner wallet name which gives permissions to the coldkey: + +```bash +>> Enter wallet name (default): owner # Enter your owner wallet name +>> Enter password to unlock key: # Enter your wallet password. +>> Register subnet? [y/n]: # Select yes (y) +>> ⠇ 📡 Registering subnet... +✅ Registered subnetwork with netuid: 1 # Your subnet netuid will show here, save this for later. +``` + +## 6. Register keys + +This step registers your subnet validator and subnet miner keys to the subnet, giving them the **first two slots** on the subnet. + +Register your miner key to the subnet: + +```bash +btcli subnet recycle_register --netuid 13 --subtensor.network test --wallet.name miner --wallet.hotkey default +``` + +Follow the below prompts: + +```bash +>> Enter netuid [1] (1): # Enter netuid 1 to specify the subnet you just created. +>> Continue Registration? + hotkey: ... + coldkey: ... + network: finney [y/n]: # Select yes (y) +>> ✅ Registered +``` + +Next, register your validator key to the subnet: + +```bash +btcli subnet recycle_register --netuid 13 --subtensor.network test --wallet.name validator --wallet.hotkey default +``` + +Follow the prompts: + +```bash +>> Enter netuid [1] (1): # Enter netuid 1 to specify the subnet you just created. +>> Continue Registration? + hotkey: ... + coldkey: ... + network: finney [y/n]: # Select yes (y) +>> ✅ Registered +``` + +## 7. Check that your keys have been registered + +This step returns information about your registered keys. + +Check that your validator key has been registered: + +```bash +btcli wallet overview --wallet.name validator --subtensor.network test +``` + +The above command will display the below: + +```bash +Subnet: 1 +COLDKEY HOTKEY UID ACTIVE STAKE(τ) RANK TRUST CONSENSUS INCENTIVE DIVIDENDS EMISSION(ρ) VTRUST VPERMIT UPDATED AXON HOTKEY_SS58 +miner default 0 True 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0 0.00000 14 none 5GTFrsEQfvTsh3WjiEVFeKzFTc2xcf… +1 1 2 τ0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 ρ0 0.00000 + Wallet balance: τ0.0 +``` + +Check that your miner has been registered: + +```bash +btcli wallet overview --wallet.name miner --subtensor.network test +``` + +The above command will display the below: + +```bash +Subnet: 1 +COLDKEY HOTKEY UID ACTIVE STAKE(τ) RANK TRUST CONSENSUS INCENTIVE DIVIDENDS EMISSION(ρ) VTRUST VPERMIT UPDATED AXON HOTKEY_SS58 +miner default 1 True 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0 0.00000 14 none 5GTFrsEQfvTsh3WjiEVFeKzFTc2xcf… +1 1 2 τ0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 ρ0 0.00000 + Wallet balance: τ0.0 +``` + +## 8. Run subnet miner and subnet validator + +Run the subnet miner: + +```bash +python neurons/miner.py --netuid 1 --subtensor.network test --wallet.name miner --wallet.hotkey default --logging.debug +``` + +You will see the below terminal output: + +```bash +>> 2023-08-08 16:58:11.223 | INFO | Running miner for subnet: 1 on network: ws://127.0.0.1:9946 with config: ... +``` + +Next, run the subnet validator: + +```bash +python neurons/validator.py --netuid 1 --subtensor.network test --wallet.name validator --wallet.hotkey default --logging.debug +``` + +You will see the below terminal output: + +```bash +>> 2023-08-08 16:58:11.223 | INFO | Running validator for subnet: 1 on network: ws://127.0.0.1:9946 with config: ... +``` + + +## 9. Get emissions flowing + +Register to the root network using the `btcli`: + +```bash +btcli root register --subtensor.network test +``` + +Then set your weights for the subnet: + +```bash +btcli root weights --subtensor.network test +``` + +## 10. Stopping your nodes + +To stop your nodes, press CTRL + C in the terminal where the nodes are running. diff --git a/docs/stream_tutorial/README.md b/docs/stream_tutorial/README.md new file mode 100644 index 0000000..f213fd3 --- /dev/null +++ b/docs/stream_tutorial/README.md @@ -0,0 +1,490 @@ +# Bittensor Streaming Tutorial +This document is intented as a developer-friendly walkthrough of integrating streaming into your bittensor application. + +If you prefer to jump right into a complete stand-alone example, see: +- `miner.py` +- `protocol.py` +- `client.py` + +Start your miner: +```bash +python miner.py --netuid 8 --wallet.name default --wallet.hotkey miner --subtensor.network test --axon.port 10000 --logging.trace +``` + +Run the client: +```bash +python client.py --netuid 8 --my_uid 1 --network test +``` + +## Overview +This tutorial is designed to show you how to use the streaming API to integrate into your application. It will cover the following topics: +- writing your streaming protocol (inherits from bittensor.StreamingSynapse) +- writing your streaming server (uses your streaming protocol) +- writing your streaming client (uses your streaming protocol) + +### Defining your streaming protocol +When designing your protocol, it would be helpful to look at the bittensor.StreamingSynapse for reference. Below is a condensed snippet of the abstract methods that you will need to implement in your subclass. + +You will need to implement two methods: + +- `process_streaming_response` +- `extract_response_json` + +These two methods are the core of your streaming protocol. The first method process_streaming_response is called as the response is being streamed from the network. It is responsible for handling the streaming response, such as parsing and accumulating data. The second method extract_response_json is called after the response has been processed and is responsible for retrieving structured data to be post-processed in the dendrite in bittensor core code. + +```python +class StreamingSynapse(bittensor.Synapse, ABC): + ... + class BTStreamingResponse(_StreamingResponse): + ... + @abstractmethod + async def process_streaming_response(self, response: Response): + """ + Abstract method that must be implemented by the subclass. + This method should provide logic to handle the streaming response, such as parsing and accumulating data. + It is called as the response is being streamed from the network, and should be implemented to handle the specific + streaming data format and requirements of the subclass. + + Args: + response: The response object to be processed, typically containing chunks of data. + """ + ... + + @abstractmethod + def extract_response_json(self, response: Response) -> dict: + """ + Abstract method that must be implemented by the subclass. + This method should provide logic to extract JSON data from the response, including headers and content. + It is called after the response has been processed and is responsible for retrieving structured data + that can be used by the application. + + Args: + response: The response object from which to extract JSON data. + """ + ... + ... +``` + +See the full reference code at the bittensor [repo](https://github.com/opentensor/bittensor/blob/master/bittensor/stream.py). + + +#### Create your protocol +Let's walk through how to create a protocol using the bittensor.StreamingSynapse class. +```python +class MyStreamingSynapse(bt.StreamingSynapse): + # define your expected data fields here as pydantic field objects + # This allows you to control what information is passed along the network + messages: List[str] = pydantic.Field( + ..., # this ellipsis (...) indicates the object is required + title="Messages", # What is the name of this field? + description="A list of messages in the Prompting scenario. Immutable.", + allow_mutation=False, # disallow modification of this field after creation + ) + completion: str = pydantic.Field( + "", + title="Completion", + ) + # add fields as necessary + ... + + # This method controls how your synapse is deserialized from the network + # E.g. you can extract whatever information you want to receive at the final + # yield in the async generator returned by the server, without receiving + # the entire synapse object itself. + # In this example, we just want the completion string at the end. + def deserialize(self) -> str: + return self.completion + + # implement your `process_streaming_response` logic to actually yield objects to the streamer + # this effectively defines the async generator that you'll recieve on the client side + async def process_streaming_response(self, response: MyStreamingSynapse): + # this is an example of how you might process a streaming response + # iterate over the response content and yield each line + async for chunk in response.content.iter_any(): + tokens = chunk.decode("utf-8").split("\n") + yield tokens + + # implement `extract_response_json` to extract the JSON data from the response headers + # this will be dependent on the data you are streaming and how you want to structure it + # it MUST conform to the following format expected by the bittensor dendrite: + """ + { + # METADATA AND HEADERS + "name": ..., + "timeout": float(...), + "total_size": int(...), + "header_size": int(...), + "dendrite": ..., + "axon": ..., + # YOUR FIELDS + "messages": self.messages, + ... + } + """ + def extract_response_json(self, response: MyStreamingSynapse) -> dict: + # iterate over the response headers and extract the necessary data + headers = { + k.decode("utf-8"): v.decode("utf-8") + for k, v in response.__dict__["_raw_headers"] + } + # helper function to extract data from headers + def extract_info(prefix): + return { + key.split("_")[-1]: value + for key, value in headers.items() + if key.startswith(prefix) + } + # return the extracted data in the expected format + return { + "name": headers.get("name", ""), + "timeout": float(headers.get("timeout", 0)), + "total_size": int(headers.get("total_size", 0)), + "header_size": int(headers.get("header_size", 0)), + "dendrite": extract_info("bt_header_dendrite"), # dendrite info + "axon": extract_info("bt_header_axon"), # axon info + "messages": self.messages, # field object + } +``` + +[Here](https://github.com/opentensor/text-prompting/blob/main/prompting/protocol.py#L131) is a full example implementation of a streaming protocol based on the text-prompting network. + +Please read the docstrings provided, they can be very helpful! + +### Writing the server +Great! Now we have our protocol defined, let's see how to define our server. +This will generate the tokens to be streamed in this prompting example. + +For brevity we will not be building a full miner, but inspecting the central components. +```python +class MyStreamPromptingMiner(bt.Miner): + ... # any relevant methods you'd need for your miner + + # define your server forward here + # NOTE: It is crucial that your typehints are correct and reflect your streaming protocol object + # otherwise the axon will reject adding your route to the server. + def forward(self, synapse: MyStreamingSynapse) -> MyStreamingSynapse: + # Let's use a GPT2 tokenizer for this toy example + tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + + # Simulated function to decode token IDs into strings. In a real-world scenario, + # this can be replaced with an actual model inference step. + def model(ids): + return (tokenizer.decode(id) for id in ids) + + # This function is called asynchronously to process the input text and send back tokens + # as a streaming response. It essentially produces the async generator that will be + # consumed by the client with an `async for` loop. + async def _forward(text: str, send: Send): + # `text` may be the input prompt to your model in a real-world scenario. + # let's tokenize them into IDs for the sake of this example. + input_ids = tokenizer(text, return_tensors="pt").input_ids.squeeze() + + # You may want to buffer your tokens before sending them back to the client. + # this can be useful so we aren't flooding the client with individual tokens + # and allows you more fine-grained control over how much data is sent back + # with each yield. + N = 3 # Number of tokens to send back to the client at a time + buffer = [] + # Iterate over the tokens and send the generationed tokens back to the client + # when we have sufficient (N) tokens in the buffer. + for token in model(input_ids): + buffer.append(token) # Add token to buffer + + # If buffer has N tokens, send them back to the client. + if len(buffer) == N: + joined_buffer = "".join(buffer) + # Send the tokens back to the client + # This is the core of the streaming response and the format + # is important. The `send` function is provided by the ASGI server + # and is responsible for sending the response back to the client. + # This buffer will be received by the client as a single chunk of + # data, which can then be split into individual tokens! + await send( + { + "type": "http.response.body", + "body": joined_buffer.encode("utf-8"), + "more_body": True, + } + ) + buffer = [] # Clear the buffer for next batch of tokens + + # Create a streaming response object using the `_forward` function + # It is useful to wrap your _forward function in a partial function + # to pass in the text argument lazily. + token_streamer = partial(_forward, synapse.messages[0]) + # Return the streaming response object, which is an instance of the + # `BTStreamingResponse` class. + return synapse.create_streaming_response(token_streamer) +``` + +#### Complete Example +Here is a full example for reference: +> This inherits from the prompting (text-prompting) miner base class. +> Take a look at the `prompting/baseminer/miner.py` file [here](https://github.com/opentensor/text-prompting/blob/main/prompting/baseminer/miner.py) for more details. + +```python +class StreamingTemplateMiner(prompting.Miner): + def config(self) -> "bt.Config": + """ + Returns the configuration object specific to this miner. + + Implement and extend this method to provide custom configurations for the miner. + Currently, it sets up a basic configuration parser. + + Returns: + bt.Config: A configuration object with the miner's operational parameters. + """ + parser = argparse.ArgumentParser(description="Streaming Miner Configs") + self.add_args(parser) + return bt.config(parser) + + def add_args(cls, parser: argparse.ArgumentParser): + """ + Adds custom arguments to the command line parser. + + Developers can introduce additional command-line arguments specific to the miner's + functionality in this method. These arguments can then be used to configure the miner's operation. + + Args: + parser (argparse.ArgumentParser): + The command line argument parser to which custom arguments should be added. + """ + pass + + def prompt(self, synapse: StreamPrompting) -> StreamPrompting: + """ + Generates a streaming response for the provided synapse. + + This function serves as the main entry point for handling streaming prompts. It takes + the incoming synapse which contains messages to be processed and returns a streaming + response. The function uses the GPT-2 tokenizer and a simulated model to tokenize and decode + the incoming message, and then sends the response back to the client token by token. + + Args: + synapse (StreamPrompting): The incoming StreamPrompting instance containing the messages to be processed. + + Returns: + StreamPrompting: The streaming response object which can be used by other functions to + stream back the response to the client. + + Usage: + This function can be extended and customized based on specific requirements of the + miner. Developers can swap out the tokenizer, model, or adjust how streaming responses + are generated to suit their specific applications. + """ + bt.logging.trace("In outer PROMPT()") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + + # Simulated function to decode token IDs into strings. In a real-world scenario, + # this can be replaced with an actual model inference step. + def model(ids): + return (tokenizer.decode(id) for id in ids) + + async def _prompt(text: str, send: Send): + """ + Asynchronously processes the input text and sends back tokens as a streaming response. + + This function takes an input text, tokenizes it using the GPT-2 tokenizer, and then + uses the simulated model to decode token IDs into strings. It then sends each token + back to the client as a streaming response, with a delay between tokens to simulate + the effect of real-time streaming. + + Args: + text (str): The input text message to be processed. + send (Send): An asynchronous function that allows sending back the streaming response. + + Usage: + This function can be adjusted based on the streaming requirements, speed of + response, or the model being used. Developers can also introduce more sophisticated + processing steps or modify how tokens are sent back to the client. + """ + bt.logging.trace("In inner _PROMPT()") + input_ids = tokenizer(text, return_tensors="pt").input_ids.squeeze() + buffer = [] + bt.logging.debug(f"Input text: {text}") + bt.logging.debug(f"Input ids: {input_ids}") + + N = 3 # Number of tokens to send back to the client at a time + for token in model(input_ids): + bt.logging.trace(f"appending token: {token}") + buffer.append(token) + # If buffer has N tokens, send them back to the client. + if len(buffer) == N: + time.sleep(0.1) + joined_buffer = "".join(buffer) + bt.logging.debug(f"sedning tokens: {joined_buffer}") + await send( + { + "type": "http.response.body", + "body": joined_buffer.encode("utf-8"), + "more_body": True, + } + ) + bt.logging.debug(f"Streamed tokens: {joined_buffer}") + buffer = [] # Clear the buffer for next batch of tokens + + # Send any remaining tokens in the buffer + if buffer: + joined_buffer = "".join(buffer) + await send( + { + "type": "http.response.body", + "body": joined_buffer.encode("utf-8"), + "more_body": False, # No more tokens to send + } + ) + bt.logging.trace(f"Streamed tokens: {joined_buffer}") + + message = synapse.messages[0] + bt.logging.trace(f"message in _prompt: {message}") + token_streamer = partial(_prompt, message) + bt.logging.trace(f"token streamer: {token_streamer}") + return synapse.create_streaming_response(token_streamer) +``` + +### Writing the client +Excellent! Now we have defined our server, now we can define our client. + +This has assumed you have: +1. Registered your miner on the chain (`finney`/`test`) +2. Are serving your miner on an open port (e.g. `12345`) + +Steps: +- Instantiate your synapse subclass with the relevant information. E.g. `messages`, `roles`, etc. +- Instantiate your wallet and a dendrite client +- Query the dendrite client with your synapse object +- Iterate over the async generator to extract the yielded tokens on the server side + +```python + +# Import bittensor +import bittensor as bt + +# Create your streaming synapse subclass object to house the request body +syn = MyStreamingSynapse( + roles=["user"], + messages=["hello this is a test of a streaming response. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."] +) + +# Create a wallet instance that must be registered on the network +wallet = bt.wallet(name="default", hotkey="default") + +# Instantiate the metagraph +metagraph = bt.metagraph( + netuid=8, network="test", sync=True, lite=False +) + +# Grab the axon you're serving +my_uid = 1 +axon = metagraph.axons[my_uid] + +# Create a Dendrite instance to handle client-side communication. +dendrite = bt.dendrite(wallet=wallet) + + +This is an async function so we can use the `await` keyword when querying the server with the dendrite object. +async def main(): + # Send a request to the Axon using the Dendrite, passing in a StreamPrompting + # instance with roles and messages. The response is awaited, as the Dendrite + # communicates asynchronously with the Axon. Returns a list of async generator. + responses = await dendrite( + [axon], + syn, + deserialize=False, + streaming=True + ) + + # Now that we have our responses we want to iterate over the yielded tokens + # iterate over the async generator to extract the yielded tokens on server side + for resp in responses: + i=0 + async for chunk in resp: + i += 1 + if i % 5 == 0: + print() + if isinstance(chunk, list): + print(chunk[0], end="", flush=True) + else: + # last object yielded is the synapse itself with completion filled + synapse = chunk + break + + # The synapse object contains the completion attribute which contains the + # accumulated tokens from the streaming response. + +if __name__ == "__main__": + # Run the main function with asyncio + asyncio.run(main()) + +``` +There you have it! + +### Complete example +If you would like to see a complete standalone example that only depends on bittensor>=6.2.0, look below: + +- client.py +- streaming_miner.py +- + +# client.py +```python +# Import bittensor and the text-prompting packages +import bittensor as bt +import prompting + +# Create a StreamPrompting synapse object to house the request body +syn = prompting.protocol.StreamPrompting( + roles=["user"], + messages=["hello this is a test of a streaming response. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."]) +syn + +# create a wallet instance that must be registered on the network +wallet = bt.wallet(name="default", hotkey="default") +wallet + +# instantiate the metagraph +metagraph = bt.metagraph( + netuid=8, network="test", sync=True, lite=False +) +metagraph + +# Grab the axon you're serving +axon = metagraph.axons[62] +axon + +# Create a Dendrite instance to handle client-side communication. +d = bt.dendrite(wallet=wallet) +d + + +async def main(): + + # Send a request to the Axon using the Dendrite, passing in a StreamPrompting + # instance with roles and messages. The response is awaited, as the Dendrite + # communicates asynchronously with the Axon. Returns a list of async generator. + responses = await d( + [axon], + syn, + deserialize=False, + streaming=True + ) + responses + + # iterate over the async generator to extract the yielded tokens on server side + for resp in responses: + i=0 + async for chunk in resp: + i += 1 + if i % 5 == 0: + print() + if isinstance(chunk, list): + print(chunk[0], end="", flush=True) + else: + # last object yielded is the synapse itself with completion filled + synapse = chunk + break + +if __name__ == "__main__": + import asyncio + asyncio.run(main()) +``` diff --git a/docs/stream_tutorial/client.py b/docs/stream_tutorial/client.py new file mode 100644 index 0000000..67e6f05 --- /dev/null +++ b/docs/stream_tutorial/client.py @@ -0,0 +1,104 @@ +import argparse +import asyncio +import bittensor as bt + +from protocol import StreamPrompting + +""" +This has assumed you have: +1. Registered your miner on the chain (finney/test) +2. Are serving your miner on an open port (e.g. 12345) + +Steps: +- Instantiate your synapse subclass with the relevant information. E.g. messages, roles, etc. +- Instantiate your wallet and a dendrite client +- Query the dendrite client with your synapse object +- Iterate over the async generator to extract the yielded tokens on the server side +""" + + +async def query_synapse(my_uid, wallet_name, hotkey, network, netuid): + syn = StreamPrompting( + roles=["user"], + messages=[ + "hello this is a test of a streaming response. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua." + ], + ) + + # create a wallet instance with provided wallet name and hotkey + wallet = bt.wallet(name=wallet_name, hotkey=hotkey) + + # instantiate the metagraph with provided network and netuid + metagraph = bt.metagraph( + netuid=netuid, network=network, sync=True, lite=False + ) + + # Grab the axon you're serving + axon = metagraph.axons[my_uid] + + # Create a Dendrite instance to handle client-side communication. + dendrite = bt.dendrite(wallet=wallet) + + async def main(): + responses = await dendrite( + [axon], syn, deserialize=False, streaming=True + ) + + for resp in responses: + i = 0 + async for chunk in resp: + i += 1 + if i % 5 == 0: + print() + if isinstance(chunk, list): + print(chunk[0], end="", flush=True) + else: + # last object yielded is the synapse itself with completion filled + synapse = chunk + break + + # Run the main function with asyncio + await main() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Query a Bittensor synapse with given parameters." + ) + + # Adding arguments + parser.add_argument( + "--my_uid", + type=int, + required=True, + help="Your unique miner ID on the chain", + ) + parser.add_argument( + "--netuid", type=int, required=True, help="Network Unique ID" + ) + parser.add_argument( + "--wallet_name", type=str, default="default", help="Name of the wallet" + ) + parser.add_argument( + "--hotkey", type=str, default="default", help="Hotkey for the wallet" + ) + parser.add_argument( + "--network", + type=str, + default="test", + help='Network type, e.g., "test" or "mainnet"', + ) + + # Parse arguments + args = parser.parse_args() + + # Running the async function with provided arguments + asyncio.run( + query_synapse( + args.my_uid, + args.wallet_name, + args.hotkey, + args.network, + args.netuid, + ) + ) diff --git a/docs/stream_tutorial/config.py b/docs/stream_tutorial/config.py new file mode 100644 index 0000000..f40a547 --- /dev/null +++ b/docs/stream_tutorial/config.py @@ -0,0 +1,122 @@ +import bittensor as bt +import argparse +import os + + +def check_config(cls, config: "bt.Config"): + bt.axon.check_config(config) + bt.logging.check_config(config) + full_path = os.path.expanduser( + "{}/{}/{}/{}".format( + config.logging.logging_dir, + config.wallet.get("name", bt.defaults.wallet.name), + config.wallet.get("hotkey", bt.defaults.wallet.hotkey), + config.miner.name, + ) + ) + config.miner.full_path = os.path.expanduser(full_path) + if not os.path.exists(config.miner.full_path): + os.makedirs(config.miner.full_path) + + +def get_config() -> "bt.Config": + parser = argparse.ArgumentParser() + parser.add_argument( + "--axon.port", type=int, default=8098, help="Port to run the axon on." + ) + # Subtensor network to connect to + parser.add_argument( + "--subtensor.network", + default="finney", + help="Bittensor network to connect to.", + ) + # Chain endpoint to connect to + parser.add_argument( + "--subtensor.chain_endpoint", + default="wss://entrypoint-finney.opentensor.ai:443", + help="Chain endpoint to connect to.", + ) + # Adds override arguments for network and netuid. + parser.add_argument( + "--netuid", type=int, default=1, help="The chain subnet uid." + ) + + parser.add_argument( + "--miner.root", + type=str, + help="Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ", + default="~/.bittensor/miners/", + ) + parser.add_argument( + "--miner.name", + type=str, + help="Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ", + default="Bittensor Miner", + ) + + # Run config. + parser.add_argument( + "--miner.blocks_per_epoch", + type=str, + help="Blocks until the miner sets weights on chain", + default=100, + ) + + # Switches. + parser.add_argument( + "--miner.no_set_weights", + action="store_true", + help="If True, the miner does not set weights.", + default=False, + ) + parser.add_argument( + "--miner.no_serve", + action="store_true", + help="If True, the miner doesnt serve the axon.", + default=False, + ) + parser.add_argument( + "--miner.no_start_axon", + action="store_true", + help="If True, the miner doesnt start the axon.", + default=False, + ) + + # Mocks. + parser.add_argument( + "--miner.mock_subtensor", + action="store_true", + help="If True, the miner will allow non-registered hotkeys to mine.", + default=False, + ) + + # Adds subtensor specific arguments i.e. --subtensor.chain_endpoint ... --subtensor.network ... + bt.subtensor.add_args(parser) + + # Adds logging specific arguments i.e. --logging.debug ..., --logging.trace .. or --logging.logging_dir ... + bt.logging.add_args(parser) + + # Adds wallet specific arguments i.e. --wallet.name ..., --wallet.hotkey ./. or --wallet.path ... + bt.wallet.add_args(parser) + + # Adds axon specific arguments i.e. --axon.port ... + bt.axon.add_args(parser) + + # Activating the parser to read any command-line inputs. + # To print help message, run python3 template/miner.py --help + config = bt.config(parser) + + # Logging captures events for diagnosis or understanding miner's behavior. + config.full_path = os.path.expanduser( + "{}/{}/{}/netuid{}/{}".format( + config.logging.logging_dir, + config.wallet.name, + config.wallet.hotkey, + config.netuid, + "miner", + ) + ) + # Ensure the directory for logging exists, else create one. + if not os.path.exists(config.full_path): + os.makedirs(config.full_path, exist_ok=True) + return config diff --git a/docs/stream_tutorial/miner.py b/docs/stream_tutorial/miner.py new file mode 100644 index 0000000..df663e1 --- /dev/null +++ b/docs/stream_tutorial/miner.py @@ -0,0 +1,402 @@ +import copy +import time +import asyncio +import argparse +import threading +import traceback +from abc import ABC, abstractmethod +from functools import partial +from starlette.types import Send + +import bittensor as bt +from transformers import GPT2Tokenizer +from typing import List, Dict, Tuple, Union, Callable, Awaitable + +from protocol import StreamPrompting +from config import get_config, check_config + + +class StreamMiner(ABC): + def __init__(self, config=None, axon=None, wallet=None, subtensor=None): + # Setup base config from Miner.config() and merge with subclassed config. + base_config = copy.deepcopy(config or get_config()) + self.config = self.config() + self.config.merge(base_config) + + check_config(StreamMiner, self.config) + bt.logging.info(self.config) # TODO: duplicate print? + + self.prompt_cache: Dict[str, Tuple[str, int]] = {} + + # Activating Bittensor's logging with the set configurations. + bt.logging(config=self.config, logging_dir=self.config.full_path) + bt.logging.info("Setting up bittensor objects.") + + # Wallet holds cryptographic information, ensuring secure transactions and communication. + self.wallet = wallet or bt.wallet(config=self.config) + bt.logging.info(f"Wallet {self.wallet}") + + # subtensor manages the blockchain connection, facilitating interaction with the Bittensor blockchain. + self.subtensor = subtensor or bt.subtensor(config=self.config) + bt.logging.info(f"Subtensor: {self.subtensor}") + bt.logging.info( + f"Running miner for subnet: {self.config.netuid} on network: {self.subtensor.chain_endpoint} with config:" + ) + + # metagraph provides the network's current state, holding state about other participants in a subnet. + self.metagraph = self.subtensor.metagraph(self.config.netuid) + bt.logging.info(f"Metagraph: {self.metagraph}") + + if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys: + bt.logging.error( + f"\nYour validator: {self.wallet} if not registered to chain connection: {self.subtensor} \nRun btcli register and try again. " + ) + exit() + else: + # Each miner gets a unique identity (UID) in the network for differentiation. + self.my_subnet_uid = self.metagraph.hotkeys.index( + self.wallet.hotkey.ss58_address + ) + bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") + + # The axon handles request processing, allowing validators to send this process requests. + self.axon = axon or bt.axon( + wallet=self.wallet, port=self.config.axon.port + ) + # Attach determiners which functions are called when servicing a request. + bt.logging.info(f"Attaching forward function to axon.") + print(f"Attaching forward function to axon. {self._prompt}") + self.axon.attach( + forward_fn=self._prompt, + ) + bt.logging.info(f"Axon created: {self.axon}") + + # Instantiate runners + self.should_exit: bool = False + self.is_running: bool = False + self.thread: threading.Thread = None + self.lock = asyncio.Lock() + self.request_timestamps: Dict = {} + + @abstractmethod + def config(self) -> "bt.Config": + ... + + @classmethod + @abstractmethod + def add_args(cls, parser: argparse.ArgumentParser): + ... + + def _prompt(self, synapse: StreamPrompting) -> StreamPrompting: + """ + A wrapper method around the `prompt` method that will be defined by the subclass. + + This method acts as an intermediary layer to perform pre-processing before calling the + actual `prompt` method implemented in the subclass. Specifically, it checks whether a + prompt is in cache to avoid reprocessing recent requests. If the prompt is not in the + cache, the subclass `prompt` method is called. + + Args: + synapse (StreamPrompting): The incoming request object encapsulating the details of the request. + + Returns: + StreamPrompting: The response object to be sent back in reply to the incoming request, essentially + the filled synapse request object. + + Raises: + ValueError: If the prompt is found in the cache indicating it was sent recently. + + Example: + This method is not meant to be called directly but is invoked internally when a request + is received, and it subsequently calls the `prompt` method of the subclass. + """ + return self.prompt(synapse) + + @abstractmethod + def prompt(self, synapse: StreamPrompting) -> StreamPrompting: + """ + Abstract method to handle and respond to incoming requests to the miner. + + Subclasses should implement this method to define their custom logic for processing and + responding to requests. This method is designed to be overridden, and its behavior will + be dependent on the specific implementation provided in the subclass. + + Args: + synapse (StreamPrompting): The incoming request object encapsulating the details + of the request. This must contain `messages` and `roles` as fields. + + Returns: + StreamPrompting: The response object that should be sent back in reply to the + incoming request. This is essentially the filled synapse request object. + + Example: + class CustomMiner(Miner): + def prompt(self, synapse: StreamPrompting) -> StreamPrompting: + # Custom logic to process and respond to the request. + synapse.completion = "The meaning of life is 42." + return synapse + """ + ... + + def run(self): + """ + Runs the miner logic. This method starts the miner's operations, including + listening for incoming requests and periodically updating the miner's knowledge + of the network graph. + """ + if not self.subtensor.is_hotkey_registered( + netuid=self.config.netuid, + hotkey_ss58=self.wallet.hotkey.ss58_address, + ): + bt.logging.error( + f"Wallet: {self.wallet} is not registered on netuid {self.config.netuid}" + f"Please register the hotkey using `btcli subnets register` before trying again" + ) + exit() + + # Serve passes the axon information to the network + netuid we are hosting on. + # This will auto-update if the axon port of external ip have changed. + bt.logging.info( + f"Serving axon {StreamPrompting} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}" + ) + self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) + + # Start starts the miner's axon, making it active on the network. + bt.logging.info( + f"Starting axon server on port: {self.config.axon.port}" + ) + self.axon.start() + + # --- Run until should_exit = True. + self.last_epoch_block = self.subtensor.get_current_block() + bt.logging.info(f"Miner starting at block: {self.last_epoch_block}") + + # This loop maintains the miner's operations until intentionally stopped. + bt.logging.info(f"Starting main loop") + step = 0 + try: + while not self.should_exit: + start_epoch = time.time() + + # --- Wait until next epoch. + current_block = self.subtensor.get_current_block() + while ( + current_block - self.last_epoch_block + < self.config.miner.blocks_per_epoch + ): + # --- Wait for next bloc. + time.sleep(1) + current_block = self.subtensor.get_current_block() + + # --- Check if we should exit. + if self.should_exit: + break + + # --- Update the metagraph with the latest network state. + self.last_epoch_block = self.subtensor.get_current_block() + + metagraph = self.subtensor.metagraph( + netuid=self.config.netuid, + lite=True, + block=self.last_epoch_block, + ) + log = ( + f"Step:{step} | " + f"Block:{metagraph.block.item()} | " + f"Stake:{metagraph.S[self.my_subnet_uid]} | " + f"Rank:{metagraph.R[self.my_subnet_uid]} | " + f"Trust:{metagraph.T[self.my_subnet_uid]} | " + f"Consensus:{metagraph.C[self.my_subnet_uid] } | " + f"Incentive:{metagraph.I[self.my_subnet_uid]} | " + f"Emission:{metagraph.E[self.my_subnet_uid]}" + ) + bt.logging.info(log) + + # --- Set weights. + if not self.config.miner.no_set_weights: + pass + step += 1 + + # If someone intentionally stops the miner, it'll safely terminate operations. + except KeyboardInterrupt: + self.axon.stop() + bt.logging.success("Miner killed by keyboard interrupt.") + exit() + + # In case of unforeseen errors, the miner will log the error and continue operations. + except Exception as e: + bt.logging.error(traceback.format_exc()) + + def run_in_background_thread(self): + """ + Starts the miner's operations in a separate background thread. + This is useful for non-blocking operations. + """ + if not self.is_running: + bt.logging.debug("Starting miner in background thread.") + self.should_exit = False + self.thread = threading.Thread(target=self.run, daemon=True) + self.thread.start() + self.is_running = True + bt.logging.debug("Started") + + def stop_run_thread(self): + """ + Stops the miner's operations that are running in the background thread. + """ + if self.is_running: + bt.logging.debug("Stopping miner in background thread.") + self.should_exit = True + self.thread.join(5) + self.is_running = False + bt.logging.debug("Stopped") + + def __enter__(self): + """ + Starts the miner's operations in a background thread upon entering the context. + This method facilitates the use of the miner in a 'with' statement. + """ + self.run_in_background_thread() + + def __exit__(self, exc_type, exc_value, traceback): + """ + Stops the miner's background operations upon exiting the context. + This method facilitates the use of the miner in a 'with' statement. + + Args: + exc_type: The type of the exception that caused the context to be exited. + None if the context was exited without an exception. + exc_value: The instance of the exception that caused the context to be exited. + None if the context was exited without an exception. + traceback: A traceback object encoding the stack trace. + None if the context was exited without an exception. + """ + self.stop_run_thread() + + +class StreamingTemplateMiner(StreamMiner): + def config(self) -> "bt.Config": + """ + Returns the configuration object specific to this miner. + + Implement and extend this method to provide custom configurations for the miner. + Currently, it sets up a basic configuration parser. + + Returns: + bt.Config: A configuration object with the miner's operational parameters. + """ + parser = argparse.ArgumentParser(description="Streaming Miner Configs") + self.add_args(parser) + return bt.config(parser) + + def add_args(cls, parser: argparse.ArgumentParser): + """ + Adds custom arguments to the command line parser. + + Developers can introduce additional command-line arguments specific to the miner's + functionality in this method. These arguments can then be used to configure the miner's operation. + + Args: + parser (argparse.ArgumentParser): + The command line argument parser to which custom arguments should be added. + """ + pass + + def prompt(self, synapse: StreamPrompting) -> StreamPrompting: + """ + Generates a streaming response for the provided synapse. + + This function serves as the main entry point for handling streaming prompts. It takes + the incoming synapse which contains messages to be processed and returns a streaming + response. The function uses the GPT-2 tokenizer and a simulated model to tokenize and decode + the incoming message, and then sends the response back to the client token by token. + + Args: + synapse (StreamPrompting): The incoming StreamPrompting instance containing the messages to be processed. + + Returns: + StreamPrompting: The streaming response object which can be used by other functions to + stream back the response to the client. + + Usage: + This function can be extended and customized based on specific requirements of the + miner. Developers can swap out the tokenizer, model, or adjust how streaming responses + are generated to suit their specific applications. + """ + bt.logging.trace("HI. PROMPT()") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + + # Simulated function to decode token IDs into strings. In a real-world scenario, + # this can be replaced with an actual model inference step. + def model(ids): + return (tokenizer.decode(id) for id in ids) + + async def _prompt(text: str, send: Send): + """ + Asynchronously processes the input text and sends back tokens as a streaming response. + + This function takes an input text, tokenizes it using the GPT-2 tokenizer, and then + uses the simulated model to decode token IDs into strings. It then sends each token + back to the client as a streaming response, with a delay between tokens to simulate + the effect of real-time streaming. + + Args: + text (str): The input text message to be processed. + send (Send): An asynchronous function that allows sending back the streaming response. + + Usage: + This function can be adjusted based on the streaming requirements, speed of + response, or the model being used. Developers can also introduce more sophisticated + processing steps or modify how tokens are sent back to the client. + """ + bt.logging.trace("HI. _PROMPT()") + input_ids = tokenizer( + text, return_tensors="pt" + ).input_ids.squeeze() + buffer = [] + bt.logging.debug(f"Input text: {text}") + bt.logging.debug(f"Input ids: {input_ids}") + + N = 3 # Number of tokens to send back to the client at a time + for token in model(input_ids): + bt.logging.trace(f"appending token: {token}") + buffer.append(token) + # If buffer has N tokens, send them back to the client. + if len(buffer) == N: + time.sleep(0.1) + joined_buffer = "".join(buffer) + bt.logging.debug(f"sedning tokens: {joined_buffer}") + await send( + { + "type": "http.response.body", + "body": joined_buffer.encode("utf-8"), + "more_body": True, + } + ) + bt.logging.debug(f"Streamed tokens: {joined_buffer}") + buffer = [] # Clear the buffer for next batch of tokens + + # Send any remaining tokens in the buffer + if buffer: + joined_buffer = "".join(buffer) + await send( + { + "type": "http.response.body", + "body": joined_buffer.encode("utf-8"), + "more_body": False, # No more tokens to send + } + ) + bt.logging.trace(f"Streamed tokens: {joined_buffer}") + + message = synapse.messages[0] + bt.logging.trace(f"message in _prompt: {message}") + token_streamer = partial(_prompt, message) + bt.logging.trace(f"token streamer: {token_streamer}") + return synapse.create_streaming_response(token_streamer) + + +# This is the main function, which runs the miner. +if __name__ == "__main__": + with StreamingTemplateMiner(): + while True: + time.sleep(1) diff --git a/docs/stream_tutorial/protocol.py b/docs/stream_tutorial/protocol.py new file mode 100644 index 0000000..26e91fd --- /dev/null +++ b/docs/stream_tutorial/protocol.py @@ -0,0 +1,154 @@ +import pydantic +import bittensor as bt + +from abc import ABC, abstractmethod +from typing import List, Union, Callable, Awaitable +from starlette.responses import StreamingResponse + + +class StreamPrompting(bt.StreamingSynapse): + """ + StreamPrompting is a specialized implementation of the `StreamingSynapse` tailored for prompting functionalities within + the Bittensor network. This class is intended to interact with a streaming response that contains a sequence of tokens, + which represent prompts or messages in a certain scenario. + + As a developer, when using or extending the `StreamPrompting` class, you should be primarily focused on the structure + and behavior of the prompts you are working with. The class has been designed to seamlessly handle the streaming, + decoding, and accumulation of tokens that represent these prompts. + + Attributes: + - `roles` (List[str]): A list of roles involved in the prompting scenario. This could represent different entities + or agents involved in the conversation or use-case. They are immutable to ensure consistent + interaction throughout the lifetime of the object. + + - `messages` (List[str]): These represent the actual prompts or messages in the prompting scenario. They are also + immutable to ensure consistent behavior during processing. + + - `completion` (str): Stores the processed result of the streaming tokens. As tokens are streamed, decoded, and + processed, they are accumulated in the completion attribute. This represents the "final" + product or result of the streaming process. + - `required_hash_fields` (List[str]): A list of fields that are required for the hash. + + Methods: + - `process_streaming_response`: This method asynchronously processes the incoming streaming response by decoding + the tokens and accumulating them in the `completion` attribute. + + - `deserialize`: Converts the `completion` attribute into its desired data format, in this case, a string. + + - `extract_response_json`: Extracts relevant JSON data from the response, useful for gaining insights on the response's + metadata or for debugging purposes. + + Note: While you can directly use the `StreamPrompting` class, it's designed to be extensible. Thus, you can create + subclasses to further customize behavior for specific prompting scenarios or requirements. + """ + + roles: List[str] = pydantic.Field( + ..., + title="Roles", + description="A list of roles in the StreamPrompting scenario. Immuatable.", + allow_mutation=False, + ) + + messages: List[str] = pydantic.Field( + ..., + title="Messages", + description="A list of messages in the StreamPrompting scenario. Immutable.", + allow_mutation=False, + ) + + required_hash_fields: List[str] = pydantic.Field( + ["messages"], + title="Required Hash Fields", + description="A list of required fields for the hash.", + allow_mutation=False, + ) + + completion: str = pydantic.Field( + "", + title="Completion", + description="Completion status of the current StreamPrompting object. This attribute is mutable and can be updated.", + ) + + async def process_streaming_response(self, response: StreamingResponse): + """ + `process_streaming_response` is an asynchronous method designed to process the incoming streaming response from the + Bittensor network. It's the heart of the StreamPrompting class, ensuring that streaming tokens, which represent + prompts or messages, are decoded and appropriately managed. + + As the streaming response is consumed, the tokens are decoded from their 'utf-8' encoded format, split based on + newline characters, and concatenated into the `completion` attribute. This accumulation of decoded tokens in the + `completion` attribute allows for a continuous and coherent accumulation of the streaming content. + + Args: + response: The streaming response object containing the content chunks to be processed. Each chunk in this + response is expected to be a set of tokens that can be decoded and split into individual messages or prompts. + """ + if self.completion is None: + self.completion = "" + bt.logging.debug( + "Processing streaming response (StreamingSynapse base class)." + ) + async for chunk in response.content.iter_any(): + bt.logging.debug(f"Processing chunk: {chunk}") + tokens = chunk.decode("utf-8").split("\n") + for token in tokens: + bt.logging.debug(f"--processing token: {token}") + if token: + self.completion += token + bt.logging.debug(f"yielding tokens {tokens}") + yield tokens + + def deserialize(self) -> str: + """ + Deserializes the response by returning the completion attribute. + + Returns: + str: The completion result. + """ + return self.completion + + def extract_response_json(self, response: StreamingResponse) -> dict: + """ + `extract_response_json` is a method that performs the crucial task of extracting pertinent JSON data from the given + response. The method is especially useful when you need a detailed insight into the streaming response's metadata + or when debugging response-related issues. + + Beyond just extracting the JSON data, the method also processes and structures the data for easier consumption + and understanding. For instance, it extracts specific headers related to dendrite and axon, offering insights + about the Bittensor network's internal processes. The method ultimately returns a dictionary with a structured + view of the extracted data. + + Args: + response: The response object from which to extract the JSON data. This object typically includes headers and + content which can be used to glean insights about the response. + + Returns: + dict: A structured dictionary containing: + - Basic response metadata such as name, timeout, total_size, and header_size. + - Dendrite and Axon related information extracted from headers. + - Roles and Messages pertaining to the current StreamPrompting instance. + - The accumulated completion. + """ + headers = { + k.decode("utf-8"): v.decode("utf-8") + for k, v in response.__dict__["_raw_headers"] + } + + def extract_info(prefix): + return { + key.split("_")[-1]: value + for key, value in headers.items() + if key.startswith(prefix) + } + + return { + "name": headers.get("name", ""), + "timeout": float(headers.get("timeout", 0)), + "total_size": int(headers.get("total_size", 0)), + "header_size": int(headers.get("header_size", 0)), + "dendrite": extract_info("bt_header_dendrite"), + "axon": extract_info("bt_header_axon"), + "roles": self.roles, + "messages": self.messages, + "completion": self.completion, + } diff --git a/docs/what_are_subnets.md b/docs/what_are_subnets.md new file mode 100644 index 0000000..eee24ce --- /dev/null +++ b/docs/what_are_subnets.md @@ -0,0 +1,27 @@ +# What is Bittensor? +Bittensor is a network where computers validate the work that other computers contribute to the network - the work what is most valuable to the collective will be rewarded + +Bittensor is a catalyst to the open-source developers and smaller AI research labs now have a financial incentive for fine-tuning open foundational models + +Bittensor is a library of machine intelligence that continuously grows and shares knowledge amongst peers + +# What is a subnet? + +Bittensor is releasing its own language for creating incentive mechanisms. This allows developers to build incentive systems on Bittensor, tapping into our web of intelligence to develop markets of the developer’s choosings + +Subnet 1, an incentive system for machine intelligence production, showcases the enormous potential of markets to procure huge amounts of resources. Releasing user-created subnets is set to create a cambrian explosion of additional resources into the Bittensor ecosystem + +# Why should you care? + +As an open-source developer, you now have the ability to write your own incentive mechanisms without creating an entirely new chain. By tapping into Bittensor’s network of intelligence, you can incentivize AI models from all over the world to perform tasks of your choosing (i.e., image generation, storage, compute access, etc.) - the possibilities are truly endless + +The release of subnets also offers the potential to pull these tools into a shared network, making all the ingredients necessary to create intelligence available within one network, governed by one token + +You get to play a vital role in helping bootstrap what could one day become one of the most powerful networks in the world - and you make money by doing so! + +By incentivizing developers to create their own markets, Bittensor is set to become a one-stop-shop for those seeking all the compute requirements for building unstoppable applications on top of an incentivized infrastructure + +# Deeper dive +Check out the Bittensor about page [here](https://bittensor.com/about) for more details about what the bittensor paradigm is and why subnets are revolutionary technology. + +Also see our [linktree](https://linktr.ee/opentensor) for more information. \ No newline at end of file diff --git a/min_compute.yml b/min_compute.yml new file mode 100644 index 0000000..1da3bb0 --- /dev/null +++ b/min_compute.yml @@ -0,0 +1,87 @@ +# Use this document to specify the minimum compute requirements. +# This document will be used to generate a list of recommended hardware for your subnet. + +# This is intended to give a rough estimate of the minimum requirements +# so that the user can make an informed decision about whether or not +# they want to run a miner or validator on their machine. + +# NOTE: Specification for miners may be different from validators + +version: '1.0' # update this version key as needed, ideally should match your release version + +compute_spec: + + miner: + + cpu: + min_cores: 4 # Minimum number of CPU cores + min_speed: 2.5 # Minimum speed per core (GHz) + recommended_cores: 8 # Recommended number of CPU cores + recommended_speed: 3.5 # Recommended speed per core (GHz) + architecture: "x86_64" # Architecture type (e.g., x86_64, arm64) + + gpu: + required: True # Does the application require a GPU? + min_vram: 8 # Minimum GPU VRAM (GB) + recommended_vram: 24 # Recommended GPU VRAM (GB) + cuda_cores: 1024 # Minimum number of CUDA cores (if applicable) + min_compute_capability: 6.0 # Minimum CUDA compute capability + recommended_compute_capability: 7.0 # Recommended CUDA compute capability + recommended_gpu: "NVIDIA A100" # provide a recommended GPU to purchase/rent + + memory: + min_ram: 16 # Minimum RAM (GB) + min_swap: 4 # Minimum swap space (GB) + recommended_swap: 8 # Recommended swap space (GB) + ram_type: "DDR4" # RAM type (e.g., DDR4, DDR3, etc.) + + storage: + min_space: 10 # Minimum free storage space (GB) + recommended_space: 100 # Recommended free storage space (GB) + type: "SSD" # Preferred storage type (e.g., SSD, HDD) + min_iops: 1000 # Minimum I/O operations per second (if applicable) + recommended_iops: 5000 # Recommended I/O operations per second + + os: + name: "Ubuntu" # Name of the preferred operating system(s) + version: 20.04 # Version of the preferred operating system(s) + + validator: + + cpu: + min_cores: 4 # Minimum number of CPU cores + min_speed: 2.5 # Minimum speed per core (GHz) + recommended_cores: 8 # Recommended number of CPU cores + recommended_speed: 3.5 # Recommended speed per core (GHz) + architecture: "x86_64" # Architecture type (e.g., x86_64, arm64) + + gpu: + required: True # Does the application require a GPU? + min_vram: 8 # Minimum GPU VRAM (GB) + recommended_vram: 24 # Recommended GPU VRAM (GB) + cuda_cores: 1024 # Minimum number of CUDA cores (if applicable) + min_compute_capability: 6.0 # Minimum CUDA compute capability + recommended_compute_capability: 7.0 # Recommended CUDA compute capability + recommended_gpu: "NVIDIA A100" # provide a recommended GPU to purchase/rent + + memory: + min_ram: 16 # Minimum RAM (GB) + min_swap: 4 # Minimum swap space (GB) + recommended_swap: 8 # Recommended swap space (GB) + ram_type: "DDR4" # RAM type (e.g., DDR4, DDR3, etc.) + + storage: + min_space: 10 # Minimum free storage space (GB) + recommended_space: 100 # Recommended free storage space (GB) + type: "SSD" # Preferred storage type (e.g., SSD, HDD) + min_iops: 1000 # Minimum I/O operations per second (if applicable) + recommended_iops: 5000 # Recommended I/O operations per second + + os: + name: "Ubuntu" # Name of the preferred operating system(s) + version: 20.04 # Version of the preferred operating system(s) + +network_spec: + bandwidth: + download: 100 # Minimum download bandwidth (Mbps) + upload: 20 # Minimum upload bandwidth (Mbps) diff --git a/neurons/__init__.py b/neurons/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/neurons/miner.py b/neurons/miner.py new file mode 100644 index 0000000..d764a4e --- /dev/null +++ b/neurons/miner.py @@ -0,0 +1,160 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import time +import typing +import bittensor as bt + +# Bittensor Miner Template: +import template + +# import base miner class which takes care of most of the boilerplate +from template.base.miner import BaseMinerNeuron + + +class Miner(BaseMinerNeuron): + """ + Your miner neuron class. You should use this class to define your miner's behavior. In particular, you should replace the forward function with your own logic. You may also want to override the blacklist and priority functions according to your needs. + + This class inherits from the BaseMinerNeuron class, which in turn inherits from BaseNeuron. The BaseNeuron class takes care of routine tasks such as setting up wallet, subtensor, metagraph, logging directory, parsing config, etc. You can override any of the methods in BaseNeuron if you need to customize the behavior. + + This class provides reasonable default behavior for a miner such as blacklisting unrecognized hotkeys, prioritizing requests based on stake, and forwarding requests to the forward function. If you need to define custom + """ + + def __init__(self, config=None): + super(Miner, self).__init__(config=config) + + # TODO(developer): Anything specific to your use case you can do here + + async def forward( + self, synapse: template.protocol.Dummy + ) -> template.protocol.Dummy: + """ + Processes the incoming 'Dummy' synapse by performing a predefined operation on the input data. + This method should be replaced with actual logic relevant to the miner's purpose. + + Args: + synapse (template.protocol.Dummy): The synapse object containing the 'dummy_input' data. + + Returns: + template.protocol.Dummy: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value. + + The 'forward' function is a placeholder and should be overridden with logic that is appropriate for + the miner's intended operation. This method demonstrates a basic transformation of input data. + """ + # TODO(developer): Replace with actual implementation logic. + synapse.dummy_output = synapse.dummy_input * 2 + return synapse + + async def blacklist( + self, synapse: template.protocol.Dummy + ) -> typing.Tuple[bool, str]: + """ + Determines whether an incoming request should be blacklisted and thus ignored. Your implementation should + define the logic for blacklisting requests based on your needs and desired security parameters. + + Blacklist runs before the synapse data has been deserialized (i.e. before synapse.data is available). + The synapse is instead contructed via the headers of the request. It is important to blacklist + requests before they are deserialized to avoid wasting resources on requests that will be ignored. + + Args: + synapse (template.protocol.Dummy): A synapse object constructed from the headers of the incoming request. + + Returns: + Tuple[bool, str]: A tuple containing a boolean indicating whether the synapse's hotkey is blacklisted, + and a string providing the reason for the decision. + + This function is a security measure to prevent resource wastage on undesired requests. It should be enhanced + to include checks against the metagraph for entity registration, validator status, and sufficient stake + before deserialization of synapse data to minimize processing overhead. + + Example blacklist logic: + - Reject if the hotkey is not a registered entity within the metagraph. + - Consider blacklisting entities that are not validators or have insufficient stake. + + In practice it would be wise to blacklist requests from entities that are not validators, or do not have + enough stake. This can be checked via metagraph.S and metagraph.validator_permit. You can always attain + the uid of the sender via a metagraph.hotkeys.index( synapse.dendrite.hotkey ) call. + + Otherwise, allow the request to be processed further. + """ + # TODO(developer): Define how miners should blacklist requests. + uid = self.metagraph.hotkeys.index(synapse.dendrite.hotkey) + if ( + not self.config.blacklist.allow_non_registered + and synapse.dendrite.hotkey not in self.metagraph.hotkeys + ): + # Ignore requests from un-registered entities. + bt.logging.trace( + f"Blacklisting un-registered hotkey {synapse.dendrite.hotkey}" + ) + return True, "Unrecognized hotkey" + + if self.config.blacklist.force_validator_permit: + # If the config is set to force validator permit, then we should only allow requests from validators. + if not self.metagraph.validator_permit[uid]: + bt.logging.warning( + f"Blacklisting a request from non-validator hotkey {synapse.dendrite.hotkey}" + ) + return True, "Non-validator hotkey" + + bt.logging.trace( + f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" + ) + return False, "Hotkey recognized!" + + async def priority(self, synapse: template.protocol.Dummy) -> float: + """ + The priority function determines the order in which requests are handled. More valuable or higher-priority + requests are processed before others. You should design your own priority mechanism with care. + + This implementation assigns priority to incoming requests based on the calling entity's stake in the metagraph. + + Args: + synapse (template.protocol.Dummy): The synapse object that contains metadata about the incoming request. + + Returns: + float: A priority score derived from the stake of the calling entity. + + Miners may recieve messages from multiple entities at once. This function determines which request should be + processed first. Higher values indicate that the request should be processed first. Lower values indicate + that the request should be processed later. + + Example priority logic: + - A higher stake results in a higher priority value. + """ + # TODO(developer): Define how miners should prioritize requests. + caller_uid = self.metagraph.hotkeys.index( + synapse.dendrite.hotkey + ) # Get the caller index. + prirority = float( + self.metagraph.S[caller_uid] + ) # Return the stake as the priority. + bt.logging.trace( + f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority + ) + return prirority + + +# This is the main function, which runs the miner. +if __name__ == "__main__": + with Miner() as miner: + while True: + bt.logging.info("Miner running...", time.time()) + time.sleep(5) diff --git a/neurons/validator.py b/neurons/validator.py new file mode 100644 index 0000000..7b50202 --- /dev/null +++ b/neurons/validator.py @@ -0,0 +1,69 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +import time + +# Bittensor +import bittensor as bt + +# Bittensor Validator Template: +import template +from template.validator import forward + +# import base validator class which takes care of most of the boilerplate +from template.base.validator import BaseValidatorNeuron + + +class Validator(BaseValidatorNeuron): + """ + Your validator neuron class. You should use this class to define your validator's behavior. In particular, you should replace the forward function with your own logic. + + This class inherits from the BaseValidatorNeuron class, which in turn inherits from BaseNeuron. The BaseNeuron class takes care of routine tasks such as setting up wallet, subtensor, metagraph, logging directory, parsing config, etc. You can override any of the methods in BaseNeuron if you need to customize the behavior. + + This class provides reasonable default behavior for a validator such as keeping a moving average of the scores of the miners and using them to set weights at the end of each epoch. Additionally, the scores are reset for new hotkeys at the end of each epoch. + """ + + def __init__(self, config=None): + super(Validator, self).__init__(config=config) + + bt.logging.info("load_state()") + self.load_state() + + # TODO(developer): Anything specific to your use case you can do here + + async def forward(self): + """ + Validator forward pass. Consists of: + - Generating the query + - Querying the miners + - Getting the responses + - Rewarding the miners + - Updating the scores + """ + # TODO(developer): Rewrite this function based on your protocol definition. + return await forward(self) + + +# The main function parses the configuration and runs the validator. +if __name__ == "__main__": + with Validator() as validator: + while True: + bt.logging.info("Validator running...", time.time()) + time.sleep(5) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..c1b866e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +bittensor +torch \ No newline at end of file diff --git a/scripts/check_compatibility.sh b/scripts/check_compatibility.sh new file mode 100755 index 0000000..b0bd6b4 --- /dev/null +++ b/scripts/check_compatibility.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +if [ -z "$1" ]; then + echo "Please provide a Python version as an argument." + exit 1 +fi + +python_version="$1" +all_passed=true + +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +check_compatibility() { + all_supported=0 + + while read -r requirement; do + # Skip lines starting with git+ + if [[ "$requirement" == git+* ]]; then + continue + fi + + package_name=$(echo "$requirement" | awk -F'[!=<>]' '{print $1}' | awk -F'[' '{print $1}') # Strip off brackets + echo -n "Checking $package_name... " + + url="https://pypi.org/pypi/$package_name/json" + response=$(curl -s $url) + status_code=$(curl -s -o /dev/null -w "%{http_code}" $url) + + if [ "$status_code" != "200" ]; then + echo -e "${RED}Information not available for $package_name. Failure.${NC}" + all_supported=1 + continue + fi + + classifiers=$(echo "$response" | jq -r '.info.classifiers[]') + requires_python=$(echo "$response" | jq -r '.info.requires_python') + + base_version="Programming Language :: Python :: ${python_version%%.*}" + specific_version="Programming Language :: Python :: $python_version" + + if echo "$classifiers" | grep -q "$specific_version" || echo "$classifiers" | grep -q "$base_version"; then + echo -e "${GREEN}Supported${NC}" + elif [ "$requires_python" != "null" ]; then + if echo "$requires_python" | grep -Eq "==$python_version|>=$python_version|<=$python_version"; then + echo -e "${GREEN}Supported${NC}" + else + echo -e "${RED}Not compatible with Python $python_version due to constraint $requires_python.${NC}" + all_supported=1 + fi + else + echo -e "${YELLOW}Warning: Specific version not listed, assuming compatibility${NC}" + fi + done < requirements.txt + + return $all_supported +} + +echo "Checking compatibility for Python $python_version..." +check_compatibility +if [ $? -eq 0 ]; then + echo -e "${GREEN}All requirements are compatible with Python $python_version.${NC}" +else + echo -e "${RED}All requirements are NOT compatible with Python $python_version.${NC}" + all_passed=false +fi + +echo "" +if $all_passed; then + echo -e "${GREEN}All tests passed.${NC}" +else + echo -e "${RED}All tests did not pass.${NC}" + exit 1 +fi diff --git a/scripts/check_requirements_changes.sh b/scripts/check_requirements_changes.sh new file mode 100755 index 0000000..a06d050 --- /dev/null +++ b/scripts/check_requirements_changes.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Check if requirements files have changed in the last commit +if git diff --name-only HEAD~1 | grep -E 'requirements.txt|requirements.txt'; then + echo "Requirements files have changed. Running compatibility checks..." + echo 'export REQUIREMENTS_CHANGED="true"' >> $BASH_ENV +else + echo "Requirements files have not changed. Skipping compatibility checks..." + echo 'export REQUIREMENTS_CHANGED="false"' >> $BASH_ENV +fi diff --git a/scripts/install_staging.sh b/scripts/install_staging.sh new file mode 100644 index 0000000..24280ce --- /dev/null +++ b/scripts/install_staging.sh @@ -0,0 +1,145 @@ +#!/bin/bash + +# Section 1: Build/Install +# This section is for first-time setup and installations. + +install_dependencies() { + # Function to install packages on macOS + install_mac() { + which brew > /dev/null + if [ $? -ne 0 ]; then + echo "Installing Homebrew..." + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + fi + echo "Updating Homebrew packages..." + brew update + echo "Installing required packages..." + brew install make llvm curl libssl protobuf tmux + } + + # Function to install packages on Ubuntu/Debian + install_ubuntu() { + echo "Updating system packages..." + sudo apt update + echo "Installing required packages..." + sudo apt install --assume-yes make build-essential git clang curl libssl-dev llvm libudev-dev protobuf-compiler tmux + } + + # Detect OS and call the appropriate function + if [[ "$OSTYPE" == "darwin"* ]]; then + install_mac + elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + install_ubuntu + else + echo "Unsupported operating system." + exit 1 + fi + + # Install rust and cargo + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + + # Update your shell's source to include Cargo's path + source "$HOME/.cargo/env" +} + +# Call install_dependencies only if it's the first time running the script +if [ ! -f ".dependencies_installed" ]; then + install_dependencies + touch .dependencies_installed +fi + + +# Section 2: Test/Run +# This section is for running and testing the setup. + +# Create a coldkey for the owner role +wallet=${1:-owner} + +# Logic for setting up and running the environment +setup_environment() { + # Clone subtensor and enter the directory + if [ ! -d "subtensor" ]; then + git clone https://github.com/opentensor/subtensor.git + fi + cd subtensor + git pull + + # Update to the nightly version of rust + ./scripts/init.sh + + cd ../bittensor-subnet-template + + # Install the bittensor-subnet-template python package + python -m pip install -e . + + # Create and set up wallets + # This section can be skipped if wallets are already set up + if [ ! -f ".wallets_setup" ]; then + btcli wallet new_coldkey --wallet.name $wallet --no_password --no_prompt + btcli wallet new_coldkey --wallet.name miner --no_password --no_prompt + btcli wallet new_hotkey --wallet.name miner --wallet.hotkey default --no_prompt + btcli wallet new_coldkey --wallet.name validator --no_password --no_prompt + btcli wallet new_hotkey --wallet.name validator --wallet.hotkey default --no_prompt + touch .wallets_setup + fi + +} + +# Call setup_environment every time +setup_environment + +## Setup localnet +# assumes we are in the bittensor-subnet-template/ directory +# Initialize your local subtensor chain in development mode. This command will set up and run a local subtensor network. +cd ../subtensor + +# Start a new tmux session and create a new pane, but do not switch to it +echo "FEATURES='pow-faucet runtime-benchmarks' BT_DEFAULT_TOKEN_WALLET=$(cat ~/.bittensor/wallets/$wallet/coldkeypub.txt | grep -oP '"ss58Address": "\K[^"]+') bash scripts/localnet.sh" >> setup_and_run.sh +chmod +x setup_and_run.sh +tmux new-session -d -s localnet -n 'localnet' +tmux send-keys -t localnet 'bash ../subtensor/setup_and_run.sh' C-m + +# Notify the user +echo ">> localnet.sh is running in a detached tmux session named 'localnet'" +echo ">> You can attach to this session with: tmux attach-session -t localnet" + +# Register a subnet (this needs to be run each time we start a new local chain) +btcli subnet create --wallet.name $wallet --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt + +# Transfer tokens to miner and validator coldkeys +export BT_MINER_TOKEN_WALLET=$(cat ~/.bittensor/wallets/miner/coldkeypub.txt | grep -oP '"ss58Address": "\K[^"]+') +export BT_VALIDATOR_TOKEN_WALLET=$(cat ~/.bittensor/wallets/validator/coldkeypub.txt | grep -oP '"ss58Address": "\K[^"]+') + +btcli wallet transfer --subtensor.network ws://127.0.0.1:9946 --wallet.name $wallet --dest $BT_MINER_TOKEN_WALLET --amount 1000 --no_prompt +btcli wallet transfer --subtensor.network ws://127.0.0.1:9946 --wallet.name $wallet --dest $BT_VALIDATOR_TOKEN_WALLET --amount 10000 --no_prompt + +# Register wallet hotkeys to subnet +btcli subnet register --wallet.name miner --netuid 1 --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt +btcli subnet register --wallet.name validator --netuid 1 --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt + +# Add stake to the validator +btcli stake add --wallet.name validator --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --amount 10000 --no_prompt + +# Ensure both the miner and validator keys are successfully registered. +btcli subnet list --subtensor.chain_endpoint ws://127.0.0.1:9946 +btcli wallet overview --wallet.name validator --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt +btcli wallet overview --wallet.name miner --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt + +cd ../bittensor-subnet-template + + +# Check if inside a tmux session +if [ -z "$TMUX" ]; then + # Start a new tmux session and run the miner in the first pane + tmux new-session -d -s bittensor -n 'miner' 'python neurons/miner.py --netuid 1 --subtensor.chain_endpoint ws://127.0.0.1:9946 --wallet.name miner --wallet.hotkey default --logging.debug' + + # Split the window and run the validator in the new pane + tmux split-window -h -t bittensor:miner 'python neurons/validator.py --netuid 1 --subtensor.chain_endpoint ws://127.0.0.1:9946 --wallet.name validator --wallet.hotkey default --logging.debug' + + # Attach to the new tmux session + tmux attach-session -t bittensor +else + # If already in a tmux session, create two panes in the current window + tmux split-window -h 'python neurons/miner.py --netuid 1 --subtensor.chain_endpoint ws://127.0.0.1:9946 --wallet.name miner --wallet.hotkey default --logging.debug' + tmux split-window -v -t 0 'python neurons/validator.py --netuid 1 --subtensor.chain_endpoint ws://127.0.0.1:9946 --wallet.name3 validator --wallet.hotkey default --logging.debug' +fi diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..f76ec9b --- /dev/null +++ b/setup.py @@ -0,0 +1,96 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import re +import os +import codecs +import pathlib +from os import path +from io import open +from setuptools import setup, find_packages +from pkg_resources import parse_requirements + + +def read_requirements(path): + with open(path, "r") as f: + requirements = f.read().splitlines() + processed_requirements = [] + + for req in requirements: + # For git or other VCS links + if req.startswith("git+") or "@" in req: + pkg_name = re.search(r"(#egg=)([\w\-_]+)", req) + if pkg_name: + processed_requirements.append(pkg_name.group(2)) + else: + # You may decide to raise an exception here, + # if you want to ensure every VCS link has an #egg= at the end + continue + else: + processed_requirements.append(req) + return processed_requirements + + +requirements = read_requirements("requirements.txt") +here = path.abspath(path.dirname(__file__)) + +with open(path.join(here, "README.md"), encoding="utf-8") as f: + long_description = f.read() + +# loading version from setup.py +with codecs.open( + os.path.join(here, "template/__init__.py"), encoding="utf-8" +) as init_file: + version_match = re.search( + r"^__version__ = ['\"]([^'\"]*)['\"]", init_file.read(), re.M + ) + version_string = version_match.group(1) + +setup( + name="bittensor_subnet_template", # TODO(developer): Change this value to your module subnet name. + version=version_string, + description="bittensor_subnet_template", # TODO(developer): Change this value to your module subnet description. + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/opentensor/bittensor-subnet-template", # TODO(developer): Change this url to your module subnet github url. + author="bittensor.com", # TODO(developer): Change this value to your module subnet author name. + packages=find_packages(), + include_package_data=True, + author_email="", # TODO(developer): Change this value to your module subnet author email. + license="MIT", + python_requires=">=3.8", + install_requires=requirements, + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Topic :: Software Development :: Build Tools", + # Pick your license as you wish + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + ], +) diff --git a/subnet_links.json b/subnet_links.json new file mode 100644 index 0000000..cb594d1 --- /dev/null +++ b/subnet_links.json @@ -0,0 +1,136 @@ +{ + "subnet_repositories": [ + { + "name": "sn0", + "url": "" + }, + { + "name": "sn1", + "url": "https://github.com/opentensor/text-prompting/" + }, + { + "name": "sn2", + "url": "https://github.com/bittranslateio/bittranslate/" + }, + { + "name": "sn3", + "url": "https://github.com/gitphantomman/scraping_subnet/" + }, + { + "name": "sn4", + "url": "https://github.com/manifold-inc/targon/" + }, + { + "name": "sn5", + "url": "https://github.com/unconst/ImageSubnet/" + }, + { + "name": "sn6", + "url": "" + }, + { + "name": "sn7", + "url": "https://github.com/tensorage/tensorage/" + }, + { + "name": "sn8", + "url": "https://github.com/taoshidev/time-series-prediction-subnet/" + }, + { + "name": "sn9", + "url": "https://github.com/unconst/pretrain-subnet/" + }, + { + "name": "sn10", + "url": "https://github.com/dream-well/map-reduce-subnet/" + }, + { + "name": "sn11", + "url": "https://github.com/opentensor/text-prompting/" + }, + { + "name": "sn12", + "url": "" + }, + { + "name": "sn13", + "url": "https://github.com/RusticLuftig/data-universe/" + }, + { + "name": "sn14", + "url": "https://github.com/ceterum1/llm-defender-subnet/" + }, + { + "name": "sn15", + "url": "https://github.com/blockchain-insights/blockchain-data-subnet/" + }, + { + "name": "sn16", + "url": "https://github.com/UncleTensor/AudioSubnet/" + }, + { + "name": "sn17", + "url": "https://github.com/CortexLM/flavia/" + }, + { + "name": "sn18", + "url": "https://github.com/corcel-api/cortex.t/" + }, + { + "name": "sn19", + "url": "https://github.com/namoray/vision/" + }, + { + "name": "sn20", + "url": "https://github.com/oracle-subnet/oracle-subnet/" + }, + { + "name": "sn21", + "url": "https://github.com/ifrit98/storage-subnet/" + }, + { + "name": "sn22", + "url": "https://github.com/surcyf123/smart-scrape/" + }, + { + "name": "sn23", + "url": "https://github.com/NicheTensor/NicheImage/" + }, + { + "name": "sn24", + "url": "https://github.com/eseckft/BitAds.ai/tree/main" + }, + { + "name": "sn25", + "url": "https://github.com/KMFODA/DistributedTraining/" + }, + { + "name": "sn26", + "url": "https://github.com/Supreme-Emperor-Wang/ImageAlchemy/" + }, + { + "name": "sn27", + "url": "https://github.com/neuralinternet/compute-subnet/" + }, + { + "name": "sn28", + "url": "https://github.com/zktensor/zktensor_subnet/" + }, + { + "name": "sn29", + "url": "https://github.com/404-Repo/Subnet-29/" + }, + { + "name": "sn30", + "url": "" + }, + { + "name": "sn31", + "url": "https://github.com/bthealthcare/healthcare-subnet" + }, + { + "name": "sn32", + "url": "https://github.com/RoyalTensor/roleplay/" + } + ] +} \ No newline at end of file diff --git a/template/__init__.py b/template/__init__.py new file mode 100644 index 0000000..b6e0653 --- /dev/null +++ b/template/__init__.py @@ -0,0 +1,40 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +# TODO(developer): Change this value when updating your code base. +# Define the version of the template module. +__version__ = "0.0.0" +version_split = __version__.split(".") +__spec_version__ = ( + (1000 * int(version_split[0])) + + (10 * int(version_split[1])) + + (1 * int(version_split[2])) +) + +# Import all submodules. +from . import protocol +from . import base +from . import validator + +import json + +SUBNET_LINKS = None +with open("subnet_links.json") as f: + links_dict = json.load(f) + SUBNET_LINKS = links_dict.get("subnet_repositories", None) diff --git a/template/base/__init__.py b/template/base/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/template/base/miner.py b/template/base/miner.py new file mode 100644 index 0000000..e906310 --- /dev/null +++ b/template/base/miner.py @@ -0,0 +1,222 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import time +import torch +import asyncio +import threading +import argparse +import traceback + +import bittensor as bt + +from template.base.neuron import BaseNeuron +from template.utils.config import add_miner_args + + +class BaseMinerNeuron(BaseNeuron): + """ + Base class for Bittensor miners. + """ + + @classmethod + def add_args(cls, parser: argparse.ArgumentParser): + super().add_args(parser) + add_miner_args(cls, parser) + + def __init__(self, config=None): + super().__init__(config=config) + + # Warn if allowing incoming requests from anyone. + if not self.config.blacklist.force_validator_permit: + bt.logging.warning( + "You are allowing non-validators to send requests to your miner. This is a security risk." + ) + if self.config.blacklist.allow_non_registered: + bt.logging.warning( + "You are allowing non-registered entities to send requests to your miner. This is a security risk." + ) + + # The axon handles request processing, allowing validators to send this miner requests. + self.axon = bt.axon(wallet=self.wallet, config=self.config) + + # Attach determiners which functions are called when servicing a request. + bt.logging.info(f"Attaching forward function to miner axon.") + self.axon.attach( + forward_fn=self.forward, + blacklist_fn=self.blacklist, + priority_fn=self.priority, + ) + bt.logging.info(f"Axon created: {self.axon}") + + # Instantiate runners + self.should_exit: bool = False + self.is_running: bool = False + self.thread: threading.Thread = None + self.lock = asyncio.Lock() + + def run(self): + """ + Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors. + + This function performs the following primary tasks: + 1. Check for registration on the Bittensor network. + 2. Starts the miner's axon, making it active on the network. + 3. Periodically resynchronizes with the chain; updating the metagraph with the latest network state and setting weights. + + The miner continues its operations until `should_exit` is set to True or an external interruption occurs. + During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its + knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active + and up-to-date with the network's latest state. + + Note: + - The function leverages the global configurations set during the initialization of the miner. + - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests. + + Raises: + KeyboardInterrupt: If the miner is stopped by a manual interruption. + Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis. + """ + + # Check that miner is registered on the network. + self.sync() + + # Serve passes the axon information to the network + netuid we are hosting on. + # This will auto-update if the axon port of external ip have changed. + bt.logging.info( + f"Serving miner axon {self.axon} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}" + ) + self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) + + # Start starts the miner's axon, making it active on the network. + self.axon.start() + + bt.logging.info(f"Miner starting at block: {self.block}") + + # This loop maintains the miner's operations until intentionally stopped. + try: + while not self.should_exit: + while ( + self.block - self.metagraph.last_update[self.uid] + < self.config.neuron.epoch_length + ): + # Wait before checking again. + time.sleep(1) + + # Check if we should exit. + if self.should_exit: + break + + # Sync metagraph and potentially set weights. + self.sync() + self.step += 1 + + # If someone intentionally stops the miner, it'll safely terminate operations. + except KeyboardInterrupt: + self.axon.stop() + bt.logging.success("Miner killed by keyboard interrupt.") + exit() + + # In case of unforeseen errors, the miner will log the error and continue operations. + except Exception as e: + bt.logging.error(traceback.format_exc()) + + def run_in_background_thread(self): + """ + Starts the miner's operations in a separate background thread. + This is useful for non-blocking operations. + """ + if not self.is_running: + bt.logging.debug("Starting miner in background thread.") + self.should_exit = False + self.thread = threading.Thread(target=self.run, daemon=True) + self.thread.start() + self.is_running = True + bt.logging.debug("Started") + + def stop_run_thread(self): + """ + Stops the miner's operations that are running in the background thread. + """ + if self.is_running: + bt.logging.debug("Stopping miner in background thread.") + self.should_exit = True + self.thread.join(5) + self.is_running = False + bt.logging.debug("Stopped") + + def __enter__(self): + """ + Starts the miner's operations in a background thread upon entering the context. + This method facilitates the use of the miner in a 'with' statement. + """ + self.run_in_background_thread() + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ + Stops the miner's background operations upon exiting the context. + This method facilitates the use of the miner in a 'with' statement. + + Args: + exc_type: The type of the exception that caused the context to be exited. + None if the context was exited without an exception. + exc_value: The instance of the exception that caused the context to be exited. + None if the context was exited without an exception. + traceback: A traceback object encoding the stack trace. + None if the context was exited without an exception. + """ + self.stop_run_thread() + + def set_weights(self): + """ + Self-assigns a weight of 1 to the current miner (identified by its UID) and + a weight of 0 to all other peers in the network. The weights determine the trust level the miner assigns to other nodes on the network. + + Raises: + Exception: If there's an error while setting weights, the exception is logged for diagnosis. + """ + try: + # --- query the chain for the most current number of peers on the network + chain_weights = torch.zeros( + self.subtensor.subnetwork_n(netuid=self.metagraph.netuid) + ) + chain_weights[self.uid] = 1 + + # --- Set weights. + self.subtensor.set_weights( + wallet=self.wallet, + netuid=self.metagraph.netuid, + uids=torch.arange(0, len(chain_weights)), + weights=chain_weights.to("cpu"), + wait_for_inclusion=False, + version_key=self.spec_version, + ) + + except Exception as e: + bt.logging.error( + f"Failed to set weights on chain with exception: { e }" + ) + + bt.logging.info(f"Set weights: {chain_weights}") + + def resync_metagraph(self): + """Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph.""" + bt.logging.info("resync_metagraph()") + + # Sync the metagraph. + self.metagraph.sync(subtensor=self.subtensor) diff --git a/template/base/neuron.py b/template/base/neuron.py new file mode 100644 index 0000000..d3cbbc6 --- /dev/null +++ b/template/base/neuron.py @@ -0,0 +1,175 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import copy +import typing + +import bittensor as bt + +from abc import ABC, abstractmethod + +# Sync calls set weights and also resyncs the metagraph. +from template.utils.config import check_config, add_args, config +from template.utils.misc import ttl_get_block +from template import __spec_version__ as spec_version +from template.mock import MockSubtensor, MockMetagraph + + +class BaseNeuron(ABC): + """ + Base class for Bittensor miners. This class is abstract and should be inherited by a subclass. It contains the core logic for all neurons; validators and miners. + + In addition to creating a wallet, subtensor, and metagraph, this class also handles the synchronization of the network state via a basic checkpointing mechanism based on epoch length. + """ + + @classmethod + def check_config(cls, config: "bt.Config"): + check_config(cls, config) + + @classmethod + def add_args(cls, parser): + add_args(cls, parser) + + @classmethod + def config(cls): + return config(cls) + + subtensor: "bt.subtensor" + wallet: "bt.wallet" + metagraph: "bt.metagraph" + spec_version: int = spec_version + + @property + def block(self): + return ttl_get_block(self) + + def __init__(self, config=None): + base_config = copy.deepcopy(config or BaseNeuron.config()) + self.config = self.config() + self.config.merge(base_config) + self.check_config(self.config) + + # Set up logging with the provided configuration and directory. + bt.logging(config=self.config, logging_dir=self.config.full_path) + + # If a gpu is required, set the device to cuda:N (e.g. cuda:0) + self.device = self.config.neuron.device + + # Log the configuration for reference. + bt.logging.info(self.config) + + # Build Bittensor objects + # These are core Bittensor classes to interact with the network. + bt.logging.info("Setting up bittensor objects.") + + # The wallet holds the cryptographic key pairs for the miner. + if self.config.mock: + self.wallet = bt.MockWallet(config=self.config) + self.subtensor = MockSubtensor( + self.config.netuid, wallet=self.wallet + ) + self.metagraph = MockMetagraph( + self.config.netuid, subtensor=self.subtensor + ) + else: + self.wallet = bt.wallet(config=self.config) + self.subtensor = bt.subtensor(config=self.config) + self.metagraph = self.subtensor.metagraph(self.config.netuid) + + bt.logging.info(f"Wallet: {self.wallet}") + bt.logging.info(f"Subtensor: {self.subtensor}") + bt.logging.info(f"Metagraph: {self.metagraph}") + + # Check if the miner is registered on the Bittensor network before proceeding further. + self.check_registered() + + # Each miner gets a unique identity (UID) in the network for differentiation. + self.uid = self.metagraph.hotkeys.index( + self.wallet.hotkey.ss58_address + ) + bt.logging.info( + f"Running neuron on subnet: {self.config.netuid} with uid {self.uid} using network: {self.subtensor.chain_endpoint}" + ) + self.step = 0 + + @abstractmethod + async def forward(self, synapse: bt.Synapse) -> bt.Synapse: + ... + + @abstractmethod + def run(self): + ... + + def sync(self): + """ + Wrapper for synchronizing the state of the network for the given miner or validator. + """ + # Ensure miner or validator hotkey is still registered on the network. + self.check_registered() + + if self.should_sync_metagraph(): + self.resync_metagraph() + + if self.should_set_weights(): + self.set_weights() + + # Always save state. + self.save_state() + + def check_registered(self): + # --- Check for registration. + if not self.subtensor.is_hotkey_registered( + netuid=self.config.netuid, + hotkey_ss58=self.wallet.hotkey.ss58_address, + ): + bt.logging.error( + f"Wallet: {self.wallet} is not registered on netuid {self.config.netuid}." + f" Please register the hotkey using `btcli subnets register` before trying again" + ) + exit() + + def should_sync_metagraph(self): + """ + Check if enough epoch blocks have elapsed since the last checkpoint to sync. + """ + return ( + self.block - self.metagraph.last_update[self.uid] + ) > self.config.neuron.epoch_length + + def should_set_weights(self) -> bool: + # Don't set weights on initialization. + if self.step == 0: + return False + + # Check if enough epoch blocks have elapsed since the last epoch. + if self.config.neuron.disable_set_weights: + return False + + # Define appropriate logic for when set weights. + return ( + self.block - self.metagraph.last_update[self.uid] + ) > self.config.neuron.epoch_length + + def save_state(self): + bt.logging.warning( + "save_state() not implemented for this neuron. You can implement this function to save model checkpoints or other useful data." + ) + + def load_state(self): + bt.logging.warning( + "load_state() not implemented for this neuron. You can implement this function to load model checkpoints or other useful data." + ) diff --git a/template/base/validator.py b/template/base/validator.py new file mode 100644 index 0000000..2c030db --- /dev/null +++ b/template/base/validator.py @@ -0,0 +1,362 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +import copy +import torch +import asyncio +import argparse +import threading +import bittensor as bt + +from typing import List +from traceback import print_exception + +from template.base.neuron import BaseNeuron +from template.mock import MockDendrite +from template.utils.config import add_validator_args + + +class BaseValidatorNeuron(BaseNeuron): + """ + Base class for Bittensor validators. Your validator should inherit from this class. + """ + + @classmethod + def add_args(cls, parser: argparse.ArgumentParser): + super().add_args(parser) + add_validator_args(cls, parser) + + def __init__(self, config=None): + super().__init__(config=config) + + # Save a copy of the hotkeys to local memory. + self.hotkeys = copy.deepcopy(self.metagraph.hotkeys) + + # Dendrite lets us send messages to other nodes (axons) in the network. + if self.config.mock: + self.dendrite = MockDendrite(wallet=self.wallet) + else: + self.dendrite = bt.dendrite(wallet=self.wallet) + bt.logging.info(f"Dendrite: {self.dendrite}") + + # Set up initial scoring weights for validation + bt.logging.info("Building validation weights.") + self.scores = torch.zeros( + self.metagraph.n, dtype=torch.float32, device=self.device + ) + + # Init sync with the network. Updates the metagraph. + self.sync() + + # Serve axon to enable external connections. + if not self.config.neuron.axon_off: + self.serve_axon() + else: + bt.logging.warning("axon off, not serving ip to chain.") + + # Create asyncio event loop to manage async tasks. + self.loop = asyncio.get_event_loop() + + # Instantiate runners + self.should_exit: bool = False + self.is_running: bool = False + self.thread: threading.Thread = None + self.lock = asyncio.Lock() + + def serve_axon(self): + """Serve axon to enable external connections.""" + + bt.logging.info("serving ip to chain...") + try: + self.axon = bt.axon(wallet=self.wallet, config=self.config) + + try: + self.subtensor.serve_axon( + netuid=self.config.netuid, + axon=self.axon, + ) + bt.logging.info( + f"Running validator {self.axon} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}" + ) + except Exception as e: + bt.logging.error(f"Failed to serve Axon with exception: {e}") + pass + + except Exception as e: + bt.logging.error( + f"Failed to create Axon initialize with exception: {e}" + ) + pass + + async def concurrent_forward(self): + coroutines = [ + self.forward() + for _ in range(self.config.neuron.num_concurrent_forwards) + ] + await asyncio.gather(*coroutines) + + def run(self): + """ + Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors. + + This function performs the following primary tasks: + 1. Check for registration on the Bittensor network. + 2. Continuously forwards queries to the miners on the network, rewarding their responses and updating the scores accordingly. + 3. Periodically resynchronizes with the chain; updating the metagraph with the latest network state and setting weights. + + The essence of the validator's operations is in the forward function, which is called every step. The forward function is responsible for querying the network and scoring the responses. + + Note: + - The function leverages the global configurations set during the initialization of the miner. + - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests. + + Raises: + KeyboardInterrupt: If the miner is stopped by a manual interruption. + Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis. + """ + + # Check that validator is registered on the network. + self.sync() + + bt.logging.info(f"Validator starting at block: {self.block}") + + # This loop maintains the validator's operations until intentionally stopped. + try: + while True: + bt.logging.info(f"step({self.step}) block({self.block})") + + # Run multiple forwards concurrently. + self.loop.run_until_complete(self.concurrent_forward()) + + # Check if we should exit. + if self.should_exit: + break + + # Sync metagraph and potentially set weights. + self.sync() + + self.step += 1 + + # If someone intentionally stops the validator, it'll safely terminate operations. + except KeyboardInterrupt: + self.axon.stop() + bt.logging.success("Validator killed by keyboard interrupt.") + exit() + + # In case of unforeseen errors, the validator will log the error and continue operations. + except Exception as err: + bt.logging.error("Error during validation", str(err)) + bt.logging.debug( + print_exception(type(err), err, err.__traceback__) + ) + + def run_in_background_thread(self): + """ + Starts the validator's operations in a background thread upon entering the context. + This method facilitates the use of the validator in a 'with' statement. + """ + if not self.is_running: + bt.logging.debug("Starting validator in background thread.") + self.should_exit = False + self.thread = threading.Thread(target=self.run, daemon=True) + self.thread.start() + self.is_running = True + bt.logging.debug("Started") + + def stop_run_thread(self): + """ + Stops the validator's operations that are running in the background thread. + """ + if self.is_running: + bt.logging.debug("Stopping validator in background thread.") + self.should_exit = True + self.thread.join(5) + self.is_running = False + bt.logging.debug("Stopped") + + def __enter__(self): + self.run_in_background_thread() + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ + Stops the validator's background operations upon exiting the context. + This method facilitates the use of the validator in a 'with' statement. + + Args: + exc_type: The type of the exception that caused the context to be exited. + None if the context was exited without an exception. + exc_value: The instance of the exception that caused the context to be exited. + None if the context was exited without an exception. + traceback: A traceback object encoding the stack trace. + None if the context was exited without an exception. + """ + if self.is_running: + bt.logging.debug("Stopping validator in background thread.") + self.should_exit = True + self.thread.join(5) + self.is_running = False + bt.logging.debug("Stopped") + + def set_weights(self): + """ + Sets the validator weights to the metagraph hotkeys based on the scores it has received from the miners. The weights determine the trust and incentive level the validator assigns to miner nodes on the network. + """ + + # Check if self.scores contains any NaN values and log a warning if it does. + if torch.isnan(self.scores).any(): + bt.logging.warning( + f"Scores contain NaN values. This may be due to a lack of responses from miners, or a bug in your reward functions." + ) + + # Calculate the average reward for each uid across non-zero values. + # Replace any NaN values with 0. + raw_weights = torch.nn.functional.normalize(self.scores, p=1, dim=0) + + bt.logging.debug("raw_weights", raw_weights) + bt.logging.debug("raw_weight_uids", self.metagraph.uids.to("cpu")) + # Process the raw weights to final_weights via subtensor limitations. + ( + processed_weight_uids, + processed_weights, + ) = bt.utils.weight_utils.process_weights_for_netuid( + uids=self.metagraph.uids.to("cpu"), + weights=raw_weights.to("cpu"), + netuid=self.config.netuid, + subtensor=self.subtensor, + metagraph=self.metagraph, + ) + bt.logging.debug("processed_weights", processed_weights) + bt.logging.debug("processed_weight_uids", processed_weight_uids) + + # Convert to uint16 weights and uids. + ( + uint_uids, + uint_weights, + ) = bt.utils.weight_utils.convert_weights_and_uids_for_emit( + uids=processed_weight_uids, weights=processed_weights + ) + bt.logging.debug("uint_weights", uint_weights) + bt.logging.debug("uint_uids", uint_uids) + + # Set the weights on chain via our subtensor connection. + result = self.subtensor.set_weights( + wallet=self.wallet, + netuid=self.config.netuid, + uids=uint_uids, + weights=uint_weights, + wait_for_finalization=False, + wait_for_inclusion=False, + version_key=self.spec_version, + ) + if result is True: + bt.logging.info("set_weights on chain successfully!") + else: + bt.logging.error("set_weights failed") + + def resync_metagraph(self): + """Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph.""" + bt.logging.info("resync_metagraph()") + + # Copies state of metagraph before syncing. + previous_metagraph = copy.deepcopy(self.metagraph) + + # Sync the metagraph. + self.metagraph.sync(subtensor=self.subtensor) + + # Check if the metagraph axon info has changed. + if previous_metagraph.axons == self.metagraph.axons: + return + + bt.logging.info( + "Metagraph updated, re-syncing hotkeys, dendrite pool and moving averages" + ) + # Zero out all hotkeys that have been replaced. + for uid, hotkey in enumerate(self.hotkeys): + if hotkey != self.metagraph.hotkeys[uid]: + self.scores[uid] = 0 # hotkey has been replaced + + # Check to see if the metagraph has changed size. + # If so, we need to add new hotkeys and moving averages. + if len(self.hotkeys) < len(self.metagraph.hotkeys): + # Update the size of the moving average scores. + new_moving_average = torch.zeros((self.metagraph.n)).to( + self.device + ) + min_len = min(len(self.hotkeys), len(self.scores)) + new_moving_average[:min_len] = self.scores[:min_len] + self.scores = new_moving_average + + # Update the hotkeys. + self.hotkeys = copy.deepcopy(self.metagraph.hotkeys) + + def update_scores(self, rewards: torch.FloatTensor, uids: List[int]): + """Performs exponential moving average on the scores based on the rewards received from the miners.""" + + # Check if rewards contains NaN values. + if torch.isnan(rewards).any(): + bt.logging.warning(f"NaN values detected in rewards: {rewards}") + # Replace any NaN values in rewards with 0. + rewards = torch.nan_to_num(rewards, 0) + + # Check if `uids` is already a tensor and clone it to avoid the warning. + if isinstance(uids, torch.Tensor): + uids_tensor = uids.clone().detach() + else: + uids_tensor = torch.tensor(uids).to(self.device) + + # Compute forward pass rewards, assumes uids are mutually exclusive. + # shape: [ metagraph.n ] + scattered_rewards: torch.FloatTensor = self.scores.scatter( + 0, uids_tensor, rewards + ).to(self.device) + bt.logging.debug(f"Scattered rewards: {rewards}") + + # Update scores with rewards produced by this step. + # shape: [ metagraph.n ] + alpha: float = self.config.neuron.moving_average_alpha + self.scores: torch.FloatTensor = alpha * scattered_rewards + ( + 1 - alpha + ) * self.scores.to(self.device) + bt.logging.debug(f"Updated moving avg scores: {self.scores}") + + def save_state(self): + """Saves the state of the validator to a file.""" + bt.logging.info("Saving validator state.") + + # Save the state of the validator to file. + torch.save( + { + "step": self.step, + "scores": self.scores, + "hotkeys": self.hotkeys, + }, + self.config.neuron.full_path + "/state.pt", + ) + + def load_state(self): + """Loads the state of the validator from a file.""" + bt.logging.info("Loading validator state.") + + # Load the state of the validator from file. + state = torch.load(self.config.neuron.full_path + "/state.pt") + self.step = state["step"] + self.scores = state["scores"] + self.hotkeys = state["hotkeys"] diff --git a/template/mock.py b/template/mock.py new file mode 100644 index 0000000..69eb78d --- /dev/null +++ b/template/mock.py @@ -0,0 +1,121 @@ +import time + +import asyncio +import random +import bittensor as bt + +from typing import List + + +class MockSubtensor(bt.MockSubtensor): + def __init__(self, netuid, n=16, wallet=None, network="mock"): + super().__init__(network=network) + + if not self.subnet_exists(netuid): + self.create_subnet(netuid) + + # Register ourself (the validator) as a neuron at uid=0 + if wallet is not None: + self.force_register_neuron( + netuid=netuid, + hotkey=wallet.hotkey.ss58_address, + coldkey=wallet.coldkey.ss58_address, + balance=100000, + stake=100000, + ) + + # Register n mock neurons who will be miners + for i in range(1, n + 1): + self.force_register_neuron( + netuid=netuid, + hotkey=f"miner-hotkey-{i}", + coldkey="mock-coldkey", + balance=100000, + stake=100000, + ) + + +class MockMetagraph(bt.metagraph): + def __init__(self, netuid=1, network="mock", subtensor=None): + super().__init__( + netuid=netuid, network=network, sync=False + ) + + if subtensor is not None: + self.subtensor = subtensor + self.sync(subtensor=subtensor) + + for axon in self.axons: + axon.ip = "127.0.0.0" + axon.port = 8091 + + bt.logging.info(f"Metagraph: {self}") + bt.logging.info(f"Axons: {self.axons}") + + +class MockDendrite(bt.dendrite): + """ + Replaces a real bittensor network request with a mock request that just returns some static response for all axons that are passed and adds some random delay. + """ + def __init__(self, wallet): + super().__init__(wallet) + + async def forward( + self, + axons: List[bt.axon], + synapse: bt.Synapse = bt.Synapse(), + timeout: float = 12, + deserialize: bool = True, + run_async: bool = True, + streaming: bool = False, + ): + + if streaming: + raise NotImplementedError("Streaming not implemented yet.") + + async def query_all_axons(streaming: bool): + """Queries all axons for responses.""" + + async def single_axon_response(i, axon): + """Queries a single axon for a response.""" + + start_time = time.time() + s = synapse.copy() + # Attach some more required data so it looks real + s = self.preprocess_synapse_for_request(axon, s, timeout) + # We just want to mock the response, so we'll just fill in some data + process_time = random.random() + if process_time < timeout: + s.dendrite.process_time = str(time.time() - start_time) + # Update the status code and status message of the dendrite to match the axon + # TODO (developer): replace with your own expected synapse data + s.dummy_output = s.dummy_input * 2 + s.dendrite.status_code = 200 + s.dendrite.status_message = "OK" + synapse.dendrite.process_time = str(process_time) + else: + s.dummy_output = 0 + s.dendrite.status_code = 408 + s.dendrite.status_message = "Timeout" + synapse.dendrite.process_time = str(timeout) + + # Return the updated synapse object after deserializing if requested + if deserialize: + return s.deserialize() + else: + return s + + return await asyncio.gather( + *(single_axon_response(i, target_axon) for i, target_axon in enumerate(axons)) + ) + + return await query_all_axons(streaming) + + def __str__(self) -> str: + """ + Returns a string representation of the Dendrite object. + + Returns: + str: The string representation of the Dendrite object in the format "dendrite()". + """ + return "MockDendrite({})".format(self.keypair.ss58_address) \ No newline at end of file diff --git a/template/protocol.py b/template/protocol.py new file mode 100644 index 0000000..b7c50b9 --- /dev/null +++ b/template/protocol.py @@ -0,0 +1,76 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import typing +import bittensor as bt + +# TODO(developer): Rewrite with your protocol definition. + +# This is the protocol for the dummy miner and validator. +# It is a simple request-response protocol where the validator sends a request +# to the miner, and the miner responds with a dummy response. + +# ---- miner ---- +# Example usage: +# def dummy( synapse: Dummy ) -> Dummy: +# synapse.dummy_output = synapse.dummy_input + 1 +# return synapse +# axon = bt.axon().attach( dummy ).serve(netuid=...).start() + +# ---- validator --- +# Example usage: +# dendrite = bt.dendrite() +# dummy_output = dendrite.query( Dummy( dummy_input = 1 ) ) +# assert dummy_output == 2 + + +class Dummy(bt.Synapse): + """ + A simple dummy protocol representation which uses bt.Synapse as its base. + This protocol helps in handling dummy request and response communication between + the miner and the validator. + + Attributes: + - dummy_input: An integer value representing the input request sent by the validator. + - dummy_output: An optional integer value which, when filled, represents the response from the miner. + """ + + # Required request input, filled by sending dendrite caller. + dummy_input: int + + # Optional request output, filled by recieving axon. + dummy_output: typing.Optional[int] = None + + def deserialize(self) -> int: + """ + Deserialize the dummy output. This method retrieves the response from + the miner in the form of dummy_output, deserializes it and returns it + as the output of the dendrite.query() call. + + Returns: + - int: The deserialized response, which in this case is the value of dummy_output. + + Example: + Assuming a Dummy instance has a dummy_output value of 5: + >>> dummy_instance = Dummy(dummy_input=4) + >>> dummy_instance.dummy_output = 5 + >>> dummy_instance.deserialize() + 5 + """ + return self.dummy_output diff --git a/template/utils/__init__.py b/template/utils/__init__.py new file mode 100644 index 0000000..1e61220 --- /dev/null +++ b/template/utils/__init__.py @@ -0,0 +1,3 @@ +from . import config +from . import misc +from . import uids diff --git a/template/utils/config.py b/template/utils/config.py new file mode 100644 index 0000000..ab4a77e --- /dev/null +++ b/template/utils/config.py @@ -0,0 +1,249 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# Copyright © 2023 Opentensor Foundation + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import os +import torch +import argparse +import bittensor as bt +from loguru import logger + + +def check_config(cls, config: "bt.Config"): + r"""Checks/validates the config namespace object.""" + bt.logging.check_config(config) + + full_path = os.path.expanduser( + "{}/{}/{}/netuid{}/{}".format( + config.logging.logging_dir, # TODO: change from ~/.bittensor/miners to ~/.bittensor/neurons + config.wallet.name, + config.wallet.hotkey, + config.netuid, + config.neuron.name, + ) + ) + print("full path:", full_path) + config.neuron.full_path = os.path.expanduser(full_path) + if not os.path.exists(config.neuron.full_path): + os.makedirs(config.neuron.full_path, exist_ok=True) + + if not config.neuron.dont_save_events: + # Add custom event logger for the events. + logger.level("EVENTS", no=38, icon="📝") + logger.add( + os.path.join(config.neuron.full_path, "events.log"), + rotation=config.neuron.events_retention_size, + serialize=True, + enqueue=True, + backtrace=False, + diagnose=False, + level="EVENTS", + format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", + ) + + +def add_args(cls, parser): + """ + Adds relevant arguments to the parser for operation. + """ + + parser.add_argument("--netuid", type=int, help="Subnet netuid", default=1) + + parser.add_argument( + "--neuron.device", + type=str, + help="Device to run on.", + default="cuda" if torch.cuda.is_available() else "cpu", + ) + + parser.add_argument( + "--neuron.epoch_length", + type=int, + help="The default epoch length (how often we set weights, measured in 12 second blocks).", + default=100, + ) + + parser.add_argument( + "--mock", + action="store_true", + help="Mock neuron and all network components.", + default=False, + ) + + parser.add_argument( + "--neuron.events_retention_size", + type=str, + help="Events retention size.", + default="2 GB", + ) + + parser.add_argument( + "--neuron.dont_save_events", + action="store_true", + help="If set, we dont save events to a log file.", + default=False, + ) + + parser.add_argument( + "--wandb.off", + action="store_true", + help="Turn off wandb.", + default=False, + ) + + parser.add_argument( + "--wandb.offline", + action="store_true", + help="Runs wandb in offline mode.", + default=False, + ) + + parser.add_argument( + "--wandb.notes", + type=str, + help="Notes to add to the wandb run.", + default="", + ) + + +def add_miner_args(cls, parser): + """Add miner specific arguments to the parser.""" + + parser.add_argument( + "--neuron.name", + type=str, + help="Trials for this neuron go in neuron.root / (wallet_cold - wallet_hot) / neuron.name. ", + default="miner", + ) + + parser.add_argument( + "--blacklist.force_validator_permit", + action="store_true", + help="If set, we will force incoming requests to have a permit.", + default=False, + ) + + parser.add_argument( + "--blacklist.allow_non_registered", + action="store_true", + help="If set, miners will accept queries from non registered entities. (Dangerous!)", + default=False, + ) + + parser.add_argument( + "--wandb.project_name", + type=str, + default="template-miners", + help="Wandb project to log to.", + ) + + parser.add_argument( + "--wandb.entity", + type=str, + default="opentensor-dev", + help="Wandb entity to log to.", + ) + + +def add_validator_args(cls, parser): + """Add validator specific arguments to the parser.""" + + parser.add_argument( + "--neuron.name", + type=str, + help="Trials for this neuron go in neuron.root / (wallet_cold - wallet_hot) / neuron.name. ", + default="validator", + ) + + parser.add_argument( + "--neuron.timeout", + type=float, + help="The timeout for each forward call in seconds.", + default=10, + ) + + parser.add_argument( + "--neuron.num_concurrent_forwards", + type=int, + help="The number of concurrent forwards running at any time.", + default=1, + ) + + parser.add_argument( + "--neuron.sample_size", + type=int, + help="The number of miners to query in a single step.", + default=50, + ) + + parser.add_argument( + "--neuron.disable_set_weights", + action="store_true", + help="Disables setting weights.", + default=False, + ) + + parser.add_argument( + "--neuron.moving_average_alpha", + type=float, + help="Moving average alpha parameter, how much to add of the new observation.", + default=0.1, + ) + + parser.add_argument( + "--neuron.axon_off", + "--axon_off", + action="store_true", + # Note: the validator needs to serve an Axon with their IP or they may + # be blacklisted by the firewall of serving peers on the network. + help="Set this flag to not attempt to serve an Axon.", + default=False, + ) + + parser.add_argument( + "--neuron.vpermit_tao_limit", + type=int, + help="The maximum number of TAO allowed to query a validator with a vpermit.", + default=4096, + ) + + parser.add_argument( + "--wandb.project_name", + type=str, + help="The name of the project where you are sending the new run.", + default="template-validators", + ) + + parser.add_argument( + "--wandb.entity", + type=str, + help="The name of the project where you are sending the new run.", + default="opentensor-dev", + ) + + +def config(cls): + """ + Returns the configuration object specific to this miner or validator after adding relevant arguments. + """ + parser = argparse.ArgumentParser() + bt.wallet.add_args(parser) + bt.subtensor.add_args(parser) + bt.logging.add_args(parser) + bt.axon.add_args(parser) + cls.add_args(parser) + return bt.config(parser) diff --git a/template/utils/misc.py b/template/utils/misc.py new file mode 100644 index 0000000..80b4e61 --- /dev/null +++ b/template/utils/misc.py @@ -0,0 +1,112 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# Copyright © 2023 Opentensor Foundation + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import time +import math +import hashlib as rpccheckhealth +from math import floor +from typing import Callable, Any +from functools import lru_cache, update_wrapper + + +# LRU Cache with TTL +def ttl_cache(maxsize: int = 128, typed: bool = False, ttl: int = -1): + """ + Decorator that creates a cache of the most recently used function calls with a time-to-live (TTL) feature. + The cache evicts the least recently used entries if the cache exceeds the `maxsize` or if an entry has + been in the cache longer than the `ttl` period. + + Args: + maxsize (int): Maximum size of the cache. Once the cache grows to this size, subsequent entries + replace the least recently used ones. Defaults to 128. + typed (bool): If set to True, arguments of different types will be cached separately. For example, + f(3) and f(3.0) will be treated as distinct calls with distinct results. Defaults to False. + ttl (int): The time-to-live for each cache entry, measured in seconds. If set to a non-positive value, + the TTL is set to a very large number, effectively making the cache entries permanent. Defaults to -1. + + Returns: + Callable: A decorator that can be applied to functions to cache their return values. + + The decorator is useful for caching results of functions that are expensive to compute and are called + with the same arguments frequently within short periods of time. The TTL feature helps in ensuring + that the cached values are not stale. + + Example: + @ttl_cache(ttl=10) + def get_data(param): + # Expensive data retrieval operation + return data + """ + if ttl <= 0: + ttl = 65536 + hash_gen = _ttl_hash_gen(ttl) + + def wrapper(func: Callable) -> Callable: + @lru_cache(maxsize, typed) + def ttl_func(ttl_hash, *args, **kwargs): + return func(*args, **kwargs) + + def wrapped(*args, **kwargs) -> Any: + th = next(hash_gen) + return ttl_func(th, *args, **kwargs) + + return update_wrapper(wrapped, func) + + return wrapper + + +def _ttl_hash_gen(seconds: int): + """ + Internal generator function used by the `ttl_cache` decorator to generate a new hash value at regular + time intervals specified by `seconds`. + + Args: + seconds (int): The number of seconds after which a new hash value will be generated. + + Yields: + int: A hash value that represents the current time interval. + + This generator is used to create time-based hash values that enable the `ttl_cache` to determine + whether cached entries are still valid or if they have expired and should be recalculated. + """ + start_time = time.time() + while True: + yield floor((time.time() - start_time) / seconds) + + +# 12 seconds updating block. +@ttl_cache(maxsize=1, ttl=12) +def ttl_get_block(self) -> int: + """ + Retrieves the current block number from the blockchain. This method is cached with a time-to-live (TTL) + of 12 seconds, meaning that it will only refresh the block number from the blockchain at most every 12 seconds, + reducing the number of calls to the underlying blockchain interface. + + Returns: + int: The current block number on the blockchain. + + This method is useful for applications that need to access the current block number frequently and can + tolerate a delay of up to 12 seconds for the latest information. By using a cache with TTL, the method + efficiently reduces the workload on the blockchain interface. + + Example: + current_block = ttl_get_block(self) + + Note: self here is the miner or validator instance + """ + return self.subtensor.get_current_block() diff --git a/template/utils/uids.py b/template/utils/uids.py new file mode 100644 index 0000000..ce78c80 --- /dev/null +++ b/template/utils/uids.py @@ -0,0 +1,63 @@ +import torch +import random +import bittensor as bt +from typing import List + + +def check_uid_availability( + metagraph: "bt.metagraph.Metagraph", uid: int, vpermit_tao_limit: int +) -> bool: + """Check if uid is available. The UID should be available if it is serving and has less than vpermit_tao_limit stake + Args: + metagraph (:obj: bt.metagraph.Metagraph): Metagraph object + uid (int): uid to be checked + vpermit_tao_limit (int): Validator permit tao limit + Returns: + bool: True if uid is available, False otherwise + """ + # Filter non serving axons. + if not metagraph.axons[uid].is_serving: + return False + # Filter validator permit > 1024 stake. + if metagraph.validator_permit[uid]: + if metagraph.S[uid] > vpermit_tao_limit: + return False + # Available otherwise. + return True + + +def get_random_uids( + self, k: int, exclude: List[int] = None +) -> torch.LongTensor: + """Returns k available random uids from the metagraph. + Args: + k (int): Number of uids to return. + exclude (List[int]): List of uids to exclude from the random sampling. + Returns: + uids (torch.LongTensor): Randomly sampled available uids. + Notes: + If `k` is larger than the number of available `uids`, set `k` to the number of available `uids`. + """ + candidate_uids = [] + avail_uids = [] + + for uid in range(self.metagraph.n.item()): + uid_is_available = check_uid_availability( + self.metagraph, uid, self.config.neuron.vpermit_tao_limit + ) + uid_is_not_excluded = exclude is None or uid not in exclude + + if uid_is_available: + avail_uids.append(uid) + if uid_is_not_excluded: + candidate_uids.append(uid) + + # Check if candidate_uids contain enough for querying, if not grab all avaliable uids + available_uids = candidate_uids + if len(candidate_uids) < k: + available_uids += random.sample( + [uid for uid in avail_uids if uid not in candidate_uids], + k - len(candidate_uids), + ) + uids = torch.tensor(random.sample(available_uids, k)) + return uids diff --git a/template/validator/__init__.py b/template/validator/__init__.py new file mode 100644 index 0000000..e43fa85 --- /dev/null +++ b/template/validator/__init__.py @@ -0,0 +1,2 @@ +from .forward import forward +from .reward import reward diff --git a/template/validator/forward.py b/template/validator/forward.py new file mode 100644 index 0000000..e269023 --- /dev/null +++ b/template/validator/forward.py @@ -0,0 +1,61 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import bittensor as bt + +from template.protocol import Dummy +from template.validator.reward import get_rewards +from template.utils.uids import get_random_uids + + +async def forward(self): + """ + The forward function is called by the validator every time step. + + It is responsible for querying the network and scoring the responses. + + Args: + self (:obj:`bittensor.neuron.Neuron`): The neuron object which contains all the necessary state for the validator. + + """ + # TODO(developer): Define how the validator selects a miner to query, how often, etc. + # get_random_uids is an example method, but you can replace it with your own. + miner_uids = get_random_uids(self, k=self.config.neuron.sample_size) + + # The dendrite client queries the network. + responses = await self.dendrite( + # Send the query to selected miner axons in the network. + axons=[self.metagraph.axons[uid] for uid in miner_uids], + # Construct a dummy query. This simply contains a single integer. + synapse=Dummy(dummy_input=self.step), + # All responses have the deserialize function called on them before returning. + # You are encouraged to define your own deserialization function. + deserialize=True, + ) + + # Log the results for monitoring purposes. + bt.logging.info(f"Received responses: {responses}") + + # TODO(developer): Define how the validator scores responses. + # Adjust the scores based on responses from miners. + rewards = get_rewards(self, query=self.step, responses=responses) + + bt.logging.info(f"Scored responses: {rewards}") + # Update the scores based on the rewards. You may want to define your own update_scores function for custom behavior. + self.update_scores(rewards, miner_uids) diff --git a/template/validator/reward.py b/template/validator/reward.py new file mode 100644 index 0000000..ab2d435 --- /dev/null +++ b/template/validator/reward.py @@ -0,0 +1,54 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# TODO(developer): Set your name +# Copyright © 2023 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import torch +from typing import List + + +def reward(query: int, response: int) -> float: + """ + Reward the miner response to the dummy request. This method returns a reward + value for the miner, which is used to update the miner's score. + + Returns: + - float: The reward value for the miner. + """ + + return 1.0 if response == query * 2 else 0 + + +def get_rewards( + self, + query: int, + responses: List[float], +) -> torch.FloatTensor: + """ + Returns a tensor of rewards for the given query and responses. + + Args: + - query (int): The query sent to the miner. + - responses (List[float]): A list of responses from the miner. + + Returns: + - torch.FloatTensor: A tensor of rewards for the given query and responses. + """ + # Get all the reward results by iteratively calling your reward() function. + return torch.FloatTensor( + [reward(query, response) for response in responses] + ).to(self.device) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 0000000..9213446 --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,181 @@ +# The MIT License (MIT) +# Copyright © 2023 Opentensor Foundation + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import Union +from bittensor import ( + Balance, + NeuronInfo, + AxonInfo, + PrometheusInfo, + __ss58_format__, +) +from bittensor.mock.wallet_mock import MockWallet as _MockWallet +from bittensor.mock.wallet_mock import get_mock_coldkey as _get_mock_coldkey +from bittensor.mock.wallet_mock import get_mock_hotkey as _get_mock_hotkey +from bittensor.mock.wallet_mock import get_mock_keypair as _get_mock_keypair +from bittensor.mock.wallet_mock import get_mock_wallet as _get_mock_wallet + +from rich.console import Console +from rich.text import Text + + +def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet: + """Returns a mock wallet object.""" + + mock_wallet = _get_mock_wallet() + + return mock_wallet + + +class CLOSE_IN_VALUE: + value: Union[float, int, Balance] + tolerance: Union[float, int, Balance] + + def __init__( + self, + value: Union[float, int, Balance], + tolerance: Union[float, int, Balance] = 0.0, + ) -> None: + self.value = value + self.tolerance = tolerance + + def __eq__(self, __o: Union[float, int, Balance]) -> bool: + # True if __o \in [value - tolerance, value + tolerance] + # or if value \in [__o - tolerance, __o + tolerance] + return ( + (self.value - self.tolerance) <= __o + and __o <= (self.value + self.tolerance) + ) or ( + (__o - self.tolerance) <= self.value + and self.value <= (__o + self.tolerance) + ) + + +def get_mock_neuron(**kwargs) -> NeuronInfo: + """ + Returns a mock neuron with the given kwargs overriding the default values. + """ + + mock_neuron_d = dict( + { + "netuid": -1, # mock netuid + "axon_info": AxonInfo( + block=0, + version=1, + ip=0, + port=0, + ip_type=0, + protocol=0, + placeholder1=0, + placeholder2=0, + ), + "prometheus_info": PrometheusInfo( + block=0, version=1, ip=0, port=0, ip_type=0 + ), + "validator_permit": True, + "uid": 1, + "hotkey": "some_hotkey", + "coldkey": "some_coldkey", + "active": 0, + "last_update": 0, + "stake": {"some_coldkey": 1e12}, + "total_stake": 1e12, + "rank": 0.0, + "trust": 0.0, + "consensus": 0.0, + "validator_trust": 0.0, + "incentive": 0.0, + "dividends": 0.0, + "emission": 0.0, + "bonds": [], + "weights": [], + "stake_dict": {}, + "pruning_score": 0.0, + "is_null": False, + } + ) + + mock_neuron_d.update(kwargs) # update with kwargs + + if kwargs.get("stake") is None and kwargs.get("coldkey") is not None: + mock_neuron_d["stake"] = {kwargs.get("coldkey"): 1e12} + + if kwargs.get("total_stake") is None: + mock_neuron_d["total_stake"] = sum(mock_neuron_d["stake"].values()) + + mock_neuron = NeuronInfo._neuron_dict_to_namespace(mock_neuron_d) + + return mock_neuron + + +def get_mock_neuron_by_uid(uid: int, **kwargs) -> NeuronInfo: + return get_mock_neuron( + uid=uid, + hotkey=_get_mock_hotkey(uid), + coldkey=_get_mock_coldkey(uid), + **kwargs + ) + + +class MockStatus: + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + def start(self): + pass + + def stop(self): + pass + + def update(self, *args, **kwargs): + MockConsole().print(*args, **kwargs) + + +class MockConsole: + """ + Mocks the console object for status and print. + Captures the last print output as a string. + """ + + captured_print = None + + def status(self, *args, **kwargs): + return MockStatus() + + def print(self, *args, **kwargs): + console = Console( + width=1000, no_color=True, markup=False + ) # set width to 1000 to avoid truncation + console.begin_capture() + console.print(*args, **kwargs) + self.captured_print = console.end_capture() + + def clear(self, *args, **kwargs): + pass + + @staticmethod + def remove_rich_syntax(text: str) -> str: + """ + Removes rich syntax from the given text. + Removes markup and ansi syntax. + """ + output_no_syntax = Text.from_ansi(Text.from_markup(text).plain).plain + + return output_no_syntax diff --git a/tests/test_mock.py b/tests/test_mock.py new file mode 100644 index 0000000..e102a06 --- /dev/null +++ b/tests/test_mock.py @@ -0,0 +1,92 @@ +import pytest +import asyncio +import bittensor as bt +from prompting.mock import MockDendrite, MockMetagraph, MockSubtensor +from prompting.protocol import PromptingSynapse + +@pytest.mark.parametrize('netuid', [1, 2, 3]) +@pytest.mark.parametrize('n', [2, 4, 8, 16, 32, 64]) +@pytest.mark.parametrize('wallet', [bt.MockWallet(), None]) +def test_mock_subtensor(netuid, n, wallet): + + subtensor = MockSubtensor(netuid=netuid, n=n, wallet=wallet) + neurons = subtensor.neurons(netuid=netuid) + # Check netuid + assert subtensor.subnet_exists(netuid) + # Check network + assert subtensor.network == 'mock' + assert subtensor.chain_endpoint == 'mock_endpoint' + # Check number of neurons + assert len(neurons) == (n + 1 if wallet is not None else n) + # Check wallet + if wallet is not None: + assert subtensor.is_hotkey_registered(netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address) + + for neuron in neurons: + assert type(neuron) == bt.NeuronInfo + assert subtensor.is_hotkey_registered(netuid=netuid, hotkey_ss58=neuron.hotkey) + +@pytest.mark.parametrize('n', [16, 32, 64]) +def test_mock_metagraph(n): + mock_subtensor = MockSubtensor(netuid=1, n=n) + mock_metagraph = MockMetagraph(subtensor=mock_subtensor) + # Check axons + axons = mock_metagraph.axons + assert len(axons) == n + # Check ip and port + for axon in axons: + assert type(axon) == bt.AxonInfo + assert axon.ip == mock_metagraph.default_ip + assert axon.port == mock_metagraph.default_port + +def test_mock_reward_pipeline(): + pass + +def test_mock_neuron(): + pass + +@pytest.mark.parametrize('timeout', [0.1, 0.2]) +@pytest.mark.parametrize('min_time', [0, 0.05, 0.1]) +@pytest.mark.parametrize('max_time', [0.1, 0.15, 0.2]) +@pytest.mark.parametrize('n', [4, 16, 64]) +def test_mock_dendrite_timings(timeout, min_time, max_time, n): + + mock_wallet = None + mock_dendrite = MockDendrite(mock_wallet) + mock_dendrite.min_time = min_time + mock_dendrite.max_time = max_time + mock_subtensor = MockSubtensor(netuid=1, n=n) + mock_metagraph = MockMetagraph(subtensor=mock_subtensor) + axons = mock_metagraph.axons + + async def run(): + return await mock_dendrite( + axons, + synapse = PromptingSynapse(roles=["user"], messages=["What is the capital of France?"]), + timeout = timeout + ) + + responses = asyncio.run(run()) + for synapse in responses: + assert hasattr(synapse, 'dendrite') and type(synapse.dendrite) == bt.TerminalInfo + + dendrite = synapse.dendrite + # check synapse.dendrite has (process_time, status_code, status_message) + for field in ('process_time', 'status_code', 'status_message'): + assert hasattr(dendrite, field) and getattr(dendrite, field) is not None + + # check that the dendrite take between min_time and max_time + assert min_time <= dendrite.process_time + assert dendrite.process_time <= max_time + 0.1 + # check that responses which take longer than timeout have 408 status code + if dendrite.process_time >= timeout + 0.1: + assert dendrite.status_code == 408 + assert dendrite.status_message == 'Timeout' + assert synapse.dummy_output == synapse.dummy_input + # check that responses which take less than timeout have 200 status code + elif dendrite.process_time < timeout: + assert dendrite.status_code == 200 + assert dendrite.status_message == 'OK' + # check that outputs are not empty for successful responses + assert synapse.dummy_output == synapse.dummy_input * 2 + # dont check for responses which take between timeout and max_time because they are not guaranteed to have a status code of 200 or 408 diff --git a/tests/test_template_validator.py b/tests/test_template_validator.py new file mode 100644 index 0000000..5d0110a --- /dev/null +++ b/tests/test_template_validator.py @@ -0,0 +1,114 @@ +# The MIT License (MIT) +# Copyright © 2023 Yuma Rao +# Copyright © 2023 Opentensor Foundation + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import sys +import torch +import unittest +import bittensor as bt + +from neurons.validator import Neuron as Validator +from neurons.miner import Neuron as Miner + +from template.protocol import Dummy +from template.validator.forward import forward +from template.utils.uids import get_random_uids +from template.validator.reward import get_rewards +from template.base.validator import BaseValidatorNeuron + + +class TemplateValidatorNeuronTestCase(unittest.TestCase): + """ + This class contains unit tests for the RewardEvent classes. + + The tests cover different scenarios where completions may or may not be successful and the reward events are checked that they don't contain missing values. + The `reward` attribute of all RewardEvents is expected to be a float, and the `is_filter_model` attribute is expected to be a boolean. + """ + + def setUp(self): + sys.argv = sys.argv[0] + ["--config", "tests/configs/validator.json"] + + config = BaseValidatorNeuron.config() + config.wallet._mock = True + config.metagraph._mock = True + config.subtensor._mock = True + self.neuron = Validator(config) + self.miner_uids = get_random_uids(self, k=10) + + def test_run_single_step(self): + # TODO: Test a single step + pass + + def test_sync_error_if_not_registered(self): + # TODO: Test that the validator throws an error if it is not registered on metagraph + pass + + def test_forward(self): + # TODO: Test that the forward function returns the correct value + pass + + def test_dummy_responses(self): + # TODO: Test that the dummy responses are correctly constructed + + responses = self.neuron.dendrite.query( + # Send the query to miners in the network. + axons=[ + self.neuron.metagraph.axons[uid] for uid in self.miner_uids + ], + # Construct a dummy query. + synapse=Dummy(dummy_input=self.neuron.step), + # All responses have the deserialize function called on them before returning. + deserialize=True, + ) + + for i, response in enumerate(responses): + self.assertEqual(response, self.neuron.step * 2) + + def test_reward(self): + # TODO: Test that the reward function returns the correct value + responses = self.dendrite.query( + # Send the query to miners in the network. + axons=[self.metagraph.axons[uid] for uid in self.miner_uids], + # Construct a dummy query. + synapse=Dummy(dummy_input=self.neuron.step), + # All responses have the deserialize function called on them before returning. + deserialize=True, + ) + + rewards = get_rewards(self.neuron, responses) + expected_rewards = torch.FloatTensor([1.0] * len(responses)) + self.assertEqual(rewards, expected_rewards) + + def test_reward_with_nan(self): + # TODO: Test that NaN rewards are correctly sanitized + # TODO: Test that a bt.logging.warning is thrown when a NaN reward is sanitized + responses = self.dendrite.query( + # Send the query to miners in the network. + axons=[self.metagraph.axons[uid] for uid in self.miner_uids], + # Construct a dummy query. + synapse=Dummy(dummy_input=self.neuron.step), + # All responses have the deserialize function called on them before returning. + deserialize=True, + ) + + rewards = get_rewards(self.neuron, responses) + expected_rewards = rewards.clone() + # Add NaN values to rewards + rewards[0] = float("nan") + + with self.assertLogs(bt.logging, level="WARNING") as cm: + self.neuron.update_scores(rewards, self.miner_uids)