-
Notifications
You must be signed in to change notification settings - Fork 0
Backtest: Binance OrderBook data
Loren1166 edited this page Sep 23, 2024
·
4 revisions
Tutorial for NautilusTrader a high-performance algorithmic trading platform and event driven backtester.
NautilusTrader 教程,一个高性能算法交易平台和事件驱动的回测器。
View source on GitHub.
在 GitHub 上查看源代码。
We are currently working on this article.
我们目前正在撰写本文。
This tutorial runs through how to set up the data catalog and a
BacktestNode
to backtest anOrderBookImbalance
strategy or order book data. This example requires you bring your own Binance order book data.本教程介绍如何设置数据目录和
BacktestNode
以回测OrderBookImbalance
策略或订单簿数据。此示例要求您自带 Binance 订单簿数据。
- Python 3.10+ installed 安装 Python 3.10+
- JupyterLab or similar installed (
pip install -U jupyterlab
) 安装 JupyterLab 或类似软件 (pip install -U jupyterlab
)- NautilusTrader latest release installed (
pip install -U nautilus_trader
) 安装 NautilusTrader 最新版本 (pip install -U nautilus_trader
)
We'll start with all of our imports for the remainder of this guide:
我们将从本指南其余部分的所有导入开始:
import os
import shutil
from decimal import Decimal
from pathlib import Path
import pandas as pd
from nautilus_trader.backtest.node import BacktestDataConfig
from nautilus_trader.backtest.node import BacktestEngineConfig
from nautilus_trader.backtest.node import BacktestNode
from nautilus_trader.backtest.node import BacktestRunConfig
from nautilus_trader.backtest.node import BacktestVenueConfig
from nautilus_trader.config import ImportableStrategyConfig
from nautilus_trader.config import LoggingConfig
from nautilus_trader.core.datetime import dt_to_unix_nanos
from nautilus_trader.model.data import OrderBookDelta
from nautilus_trader.persistence.catalog import ParquetDataCatalog
from nautilus_trader.persistence.loaders import BinanceOrderBookDeltaDataLoader
from nautilus_trader.persistence.wranglers import OrderBookDeltaDataWrangler
from nautilus_trader.test_kit.providers import TestInstrumentProvider
# Path to your data directory, using user /Downloads as an example
# 数据目录的路径,以用户 /Downloads 为例
DATA_DIR = "~/Downloads"
data_path = Path(DATA_DIR).expanduser() / "Data" / "Binance"
raw_files = list(data_path.iterdir())
assert raw_files, f"Unable to find any histdata files in directory {data_path}"
raw_files
# First we'll load the initial order book snapshot
# 首先,我们将加载初始订单簿快照
path_snap = data_path / "BTCUSDT_T_DEPTH_2022-11-01_depth_snap.csv"
df_snap = BinanceOrderBookDeltaDataLoader.load(path_snap)
df_snap.head()
# Then we'll load the order book updates, to save time here we're limiting to 1 million rows
# 然后,我们将加载订单簿更新,为了节省时间,这里我们将限制为 100 万行
path_update = data_path / "BTCUSDT_T_DEPTH_2022-11-01_depth_update.csv"
nrows = 1_000_000
df_update = BinanceOrderBookDeltaDataLoader.load(path_update, nrows=nrows)
df_update.head()
BTCUSDT_BINANCE = TestInstrumentProvider.btcusdt_binance()
wrangler = OrderBookDeltaDataWrangler(BTCUSDT_BINANCE)
deltas = wrangler.process(df_snap)
deltas += wrangler.process(df_update)
deltas.sort(key=lambda x: x.ts_init) # Ensure data is non-decreasing by `ts_init` 确保数据按 `ts_init` 不递减
deltas[:10]
CATALOG_PATH = os.getcwd() + "/catalog"
# Clear if it already exists, then create fresh
# 如果已存在,则清除,然后重新创建
if os.path.exists(CATALOG_PATH):
shutil.rmtree(CATALOG_PATH)
os.mkdir(CATALOG_PATH)
# Create a catalog instance
# 创建目录实例
catalog = ParquetDataCatalog(CATALOG_PATH)
# Write instrument and ticks to catalog
# 将Instrument和数据写入目录
catalog.write_data([BTCUSDT_BINANCE])
catalog.write_data(deltas)
# Confirm the instrument was written
# 确认Instrument已写入
catalog.instruments()
# Explore the available data in the catalog
# 浏览目录中的可用数据
start = dt_to_unix_nanos(pd.Timestamp("2022-11-01", tz="UTC"))
end = dt_to_unix_nanos(pd.Timestamp("2022-11-04", tz="UTC"))
deltas = catalog.order_book_deltas(start=start, end=end)
print(len(deltas))
deltas[:10]
instrument = catalog.instruments()[0]
book_type = "L2_MBP" # Ensure data book type matches venue book type 确保数据簿类型与交易平台簿类型匹配
data_configs = [BacktestDataConfig(
catalog_path=CATALOG_PATH,
data_cls=OrderBookDelta,
instrument_id=instrument.id,
# start_time=start, # Run across all data 运行所有数据
# end_time=end, # Run across all data 运行所有数据
)
]
venues_configs = [
BacktestVenueConfig(
name="BINANCE",
oms_type="NETTING",
account_type="CASH",
base_currency=None,
starting_balances=["20 BTC", "100000 USDT"],
book_type=book_type, # <-- Venues book type 交易平台簿类型
)
]
strategies = [
ImportableStrategyConfig(
strategy_path="nautilus_trader.examples.strategies.orderbook_imbalance:OrderBookImbalance",
config_path="nautilus_trader.examples.strategies.orderbook_imbalance:OrderBookImbalanceConfig",
config={
"instrument_id": instrument.id,
"book_type": book_type,
"max_trade_size": Decimal("1.000"),
"min_seconds_between_triggers": 1.0,
},
),
]
# NautilusTrader currently exceeds the rate limit for Jupyter notebook logging (stdout output),
# this is why the `log_level` is set to "ERROR". If you lower this level to see
# more logging then the notebook will hang during cell execution. A fix is currently
# being investigated which involves either raising the configured rate limits for
# Jupyter, or throttling the log flushing from Nautilus.
# https://github.com/jupyterlab/jupyterlab/issues/12845
# https://github.com/deshaw/jupyterlab-limit-output
# NautilusTrader 目前超过了 Jupyter 笔记本日志记录(标准输出)的速率限制,
# 这就是 `log_level` 设置为“ERROR”的原因。如果您降低此级别以查看
# 更多日志记录,则笔记本将在单元格执行期间挂起。目前正在调查一个解决方案,该解决方案涉及
# 提高 Jupyter 的配置速率限制,或限制 Nautilus 的日志刷新。
# https://github.com/jupyterlab/jupyterlab/issues/12845
# https://github.com/deshaw/jupyterlab-limit-output
config = BacktestRunConfig(
engine=BacktestEngineConfig(
strategies=strategies,
logging=LoggingConfig(log_level="ERROR"),
),
data=data_configs,
venues=venues_configs,
)
config
node = BacktestNode(configs=[config])
result = node.run()
result
from nautilus_trader.backtest.engine import BacktestEngine
from nautilus_trader.model.identifiers import Venue
engine: BacktestEngine = node.get_engine(config.id)
engine.trader.generate_order_fills_report()
engine.trader.generate_positions_report()
engine.trader.generate_account_report(Venue("BINANCE"))