diff --git a/package.json b/package.json index f726c1a1..faa44b20 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,7 @@ }, "devDependencies": { "@offchainlabs/prettier-config": "0.2.1", + "@types/lodash.set": "^4.3.9", "@wagmi/cli": "^1.5.2", "audit-ci": "^7.0.1", "dotenv": "^16.3.1", diff --git a/src/__snapshots__/nodeConfigBuilder.unit.test.ts.snap b/src/__snapshots__/nodeConfigBuilder.unit.test.ts.snap new file mode 100644 index 00000000..14ec7fd0 --- /dev/null +++ b/src/__snapshots__/nodeConfigBuilder.unit.test.ts.snap @@ -0,0 +1,164 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`creates node config 1`] = ` +{ + "chain": { + "info-json": "[{\\"chain-id\\":123456,\\"chain-name\\":\\"my-l3-chain\\",\\"chain-config\\":{\\"homesteadBlock\\":0,\\"daoForkBlock\\":null,\\"daoForkSupport\\":true,\\"eip150Block\\":0,\\"eip150Hash\\":\\"0x0000000000000000000000000000000000000000000000000000000000000000\\",\\"eip155Block\\":0,\\"eip158Block\\":0,\\"byzantiumBlock\\":0,\\"constantinopleBlock\\":0,\\"petersburgBlock\\":0,\\"istanbulBlock\\":0,\\"muirGlacierBlock\\":0,\\"berlinBlock\\":0,\\"londonBlock\\":0,\\"clique\\":{\\"period\\":0,\\"epoch\\":0},\\"arbitrum\\":{\\"EnableArbOS\\":true,\\"AllowDebugPrecompiles\\":false,\\"DataAvailabilityCommittee\\":false,\\"InitialArbOSVersion\\":11,\\"GenesisBlockNum\\":0,\\"MaxCodeSize\\":24576,\\"MaxInitCodeSize\\":49152,\\"InitialChainOwner\\":\\"0x1000000000000000000000000000000000000000\\"},\\"chainId\\":123456},\\"parent-chain-id\\":421614,\\"parent-chain-is-arbitrum\\":true,\\"rollup\\":{\\"bridge\\":\\"0x0000000000000000000000000000000000000001\\",\\"inbox\\":\\"0x0000000000000000000000000000000000000002\\",\\"sequencer-inbox\\":\\"0x0000000000000000000000000000000000000003\\",\\"rollup\\":\\"0x0000000000000000000000000000000000000004\\",\\"validator-utils\\":\\"0x0000000000000000000000000000000000000005\\",\\"validator-wallet-creator\\":\\"0x0000000000000000000000000000000000000006\\",\\"deployed-at\\":456789}}]", + "name": "my-l3-chain", + }, + "http": { + "addr": "0.0.0.0", + "api": [ + "eth", + "net", + "web3", + "arb", + "debug", + ], + "corsdomain": [ + "*", + ], + "port": 8449, + "vhosts": [ + "*", + ], + }, + "parent-chain": { + "connection": { + "url": "https://sepolia-rollup.arbitrum.io/rpc", + }, + "id": 421614, + }, +} +`; + +exports[`creates node config with data availability service 1`] = ` +{ + "chain": { + "info-json": "[{\\"chain-id\\":123456,\\"chain-name\\":\\"my-l3-chain\\",\\"chain-config\\":{\\"homesteadBlock\\":0,\\"daoForkBlock\\":null,\\"daoForkSupport\\":true,\\"eip150Block\\":0,\\"eip150Hash\\":\\"0x0000000000000000000000000000000000000000000000000000000000000000\\",\\"eip155Block\\":0,\\"eip158Block\\":0,\\"byzantiumBlock\\":0,\\"constantinopleBlock\\":0,\\"petersburgBlock\\":0,\\"istanbulBlock\\":0,\\"muirGlacierBlock\\":0,\\"berlinBlock\\":0,\\"londonBlock\\":0,\\"clique\\":{\\"period\\":0,\\"epoch\\":0},\\"arbitrum\\":{\\"EnableArbOS\\":true,\\"AllowDebugPrecompiles\\":false,\\"DataAvailabilityCommittee\\":false,\\"InitialArbOSVersion\\":11,\\"GenesisBlockNum\\":0,\\"MaxCodeSize\\":24576,\\"MaxInitCodeSize\\":49152,\\"InitialChainOwner\\":\\"0x1000000000000000000000000000000000000000\\"},\\"chainId\\":123456},\\"parent-chain-id\\":421614,\\"parent-chain-is-arbitrum\\":true,\\"rollup\\":{\\"bridge\\":\\"0x0000000000000000000000000000000000000001\\",\\"inbox\\":\\"0x0000000000000000000000000000000000000002\\",\\"sequencer-inbox\\":\\"0x0000000000000000000000000000000000000003\\",\\"rollup\\":\\"0x0000000000000000000000000000000000000004\\",\\"validator-utils\\":\\"0x0000000000000000000000000000000000000005\\",\\"validator-wallet-creator\\":\\"0x0000000000000000000000000000000000000006\\",\\"deployed-at\\":456789}}]", + "name": "my-l3-chain", + }, + "http": { + "addr": "0.0.0.0", + "api": [ + "eth", + "net", + "web3", + "arb", + "debug", + ], + "corsdomain": [ + "*", + ], + "port": 8449, + "vhosts": [ + "*", + ], + }, + "node": { + "data-availability": { + "enable": true, + "rest-aggregator": { + "enable": true, + "urls": [ + "http://localhost:9877", + ], + }, + "rpc-aggregator": { + "assumed-honest": 1, + "backends": "[{\\"url\\":\\"http://localhost:9876\\",\\"pubkey\\":\\"YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==\\",\\"signermask\\":1}]", + "enable": true, + }, + }, + }, + "parent-chain": { + "connection": { + "url": "https://sepolia-rollup.arbitrum.io/rpc", + }, + "id": 421614, + }, +} +`; + +exports[`creates node config with sequencer, batch poster and staker 1`] = ` +{ + "chain": { + "info-json": "[{\\"chain-id\\":123456,\\"chain-name\\":\\"my-l3-chain\\",\\"chain-config\\":{\\"homesteadBlock\\":0,\\"daoForkBlock\\":null,\\"daoForkSupport\\":true,\\"eip150Block\\":0,\\"eip150Hash\\":\\"0x0000000000000000000000000000000000000000000000000000000000000000\\",\\"eip155Block\\":0,\\"eip158Block\\":0,\\"byzantiumBlock\\":0,\\"constantinopleBlock\\":0,\\"petersburgBlock\\":0,\\"istanbulBlock\\":0,\\"muirGlacierBlock\\":0,\\"berlinBlock\\":0,\\"londonBlock\\":0,\\"clique\\":{\\"period\\":0,\\"epoch\\":0},\\"arbitrum\\":{\\"EnableArbOS\\":true,\\"AllowDebugPrecompiles\\":false,\\"DataAvailabilityCommittee\\":false,\\"InitialArbOSVersion\\":11,\\"GenesisBlockNum\\":0,\\"MaxCodeSize\\":24576,\\"MaxInitCodeSize\\":49152,\\"InitialChainOwner\\":\\"0x1000000000000000000000000000000000000000\\"},\\"chainId\\":123456},\\"parent-chain-id\\":421614,\\"parent-chain-is-arbitrum\\":true,\\"rollup\\":{\\"bridge\\":\\"0x0000000000000000000000000000000000000001\\",\\"inbox\\":\\"0x0000000000000000000000000000000000000002\\",\\"sequencer-inbox\\":\\"0x0000000000000000000000000000000000000003\\",\\"rollup\\":\\"0x0000000000000000000000000000000000000004\\",\\"validator-utils\\":\\"0x0000000000000000000000000000000000000005\\",\\"validator-wallet-creator\\":\\"0x0000000000000000000000000000000000000006\\",\\"deployed-at\\":456789}}]", + "name": "my-l3-chain", + }, + "execution": { + "caching": { + "archive": true, + }, + "forwarding-target": "", + "sequencer": { + "enable": true, + "max-block-speed": "250ms", + "max-tx-data-size": 85000, + }, + }, + "http": { + "addr": "0.0.0.0", + "api": [ + "eth", + "net", + "web3", + "arb", + "debug", + ], + "corsdomain": [ + "*", + ], + "port": 8449, + "vhosts": [ + "*", + ], + }, + "node": { + "batch-poster": { + "enable": true, + "max-size": 90000, + "parent-chain-wallet": { + "private-key": "BATCH_POSTER_PRIVATE_KEY", + }, + }, + "dangerous": { + "no-sequencer-coordinator": true, + }, + "delayed-sequencer": { + "enable": true, + "finalize-distance": 1, + "use-merge-finality": false, + }, + "sequencer": true, + "staker": { + "enable": true, + "parent-chain-wallet": { + "private-key": "STAKER_PRIVATE_KEY", + }, + "strategy": "MakeNodes", + }, + }, + "parent-chain": { + "connection": { + "url": "https://sepolia-rollup.arbitrum.io/rpc", + }, + "id": 421614, + }, +} +`; + +exports[`creates node config without defaults 1`] = ` +{ + "chain": { + "info-json": "[{\\"chain-id\\":123456,\\"chain-name\\":\\"my-l3-chain\\",\\"chain-config\\":{\\"homesteadBlock\\":0,\\"daoForkBlock\\":null,\\"daoForkSupport\\":true,\\"eip150Block\\":0,\\"eip150Hash\\":\\"0x0000000000000000000000000000000000000000000000000000000000000000\\",\\"eip155Block\\":0,\\"eip158Block\\":0,\\"byzantiumBlock\\":0,\\"constantinopleBlock\\":0,\\"petersburgBlock\\":0,\\"istanbulBlock\\":0,\\"muirGlacierBlock\\":0,\\"berlinBlock\\":0,\\"londonBlock\\":0,\\"clique\\":{\\"period\\":0,\\"epoch\\":0},\\"arbitrum\\":{\\"EnableArbOS\\":true,\\"AllowDebugPrecompiles\\":false,\\"DataAvailabilityCommittee\\":false,\\"InitialArbOSVersion\\":11,\\"GenesisBlockNum\\":0,\\"MaxCodeSize\\":24576,\\"MaxInitCodeSize\\":49152,\\"InitialChainOwner\\":\\"0x1000000000000000000000000000000000000000\\"},\\"chainId\\":123456},\\"parent-chain-id\\":421614,\\"parent-chain-is-arbitrum\\":true,\\"rollup\\":{\\"bridge\\":\\"0x0000000000000000000000000000000000000001\\",\\"inbox\\":\\"0x0000000000000000000000000000000000000002\\",\\"sequencer-inbox\\":\\"0x0000000000000000000000000000000000000003\\",\\"rollup\\":\\"0x0000000000000000000000000000000000000004\\",\\"validator-utils\\":\\"0x0000000000000000000000000000000000000005\\",\\"validator-wallet-creator\\":\\"0x0000000000000000000000000000000000000006\\",\\"deployed-at\\":456789}}]", + "name": "my-l3-chain", + }, + "parent-chain": { + "connection": { + "url": "https://sepolia-rollup.arbitrum.io/rpc", + }, + "id": 421614, + }, +} +`; diff --git a/src/index.ts b/src/index.ts index e53666c0..0426a6f6 100644 --- a/src/index.ts +++ b/src/index.ts @@ -68,7 +68,7 @@ import { rollupAdminLogicPublicActions } from './decorators/rollupAdminLogicPubl import { ChainConfig, ChainConfigArbitrumParams } from './types/ChainConfig'; import { CoreContracts } from './types/CoreContracts'; import { ParentChain, ParentChainId } from './types/ParentChain'; -import { NodeConfig } from './types/NodeConfig.generated'; +import { NodeConfig, NodeConfigOption } from './types/NodeConfig.generated'; import { NodeConfigChainInfoJson } from './types/NodeConfig'; import { PrepareNodeConfigParams, prepareNodeConfig } from './prepareNodeConfig'; import { @@ -164,6 +164,7 @@ export { ParentChain, ParentChainId, NodeConfig, + NodeConfigOption, NodeConfigChainInfoJson, PrepareNodeConfigParams, prepareNodeConfig, diff --git a/src/nodeConfigBuilder.ts b/src/nodeConfigBuilder.ts new file mode 100644 index 00000000..e9b35a42 --- /dev/null +++ b/src/nodeConfigBuilder.ts @@ -0,0 +1,220 @@ +import _set from 'lodash.set'; + +import { NodeConfig, NodeConfigOption } from './types/NodeConfig.generated'; +import { CoreContracts } from './types/CoreContracts'; +import { stringifyJson } from './nodeConfigBuilderUtils'; +import { parentChainIsArbitrum } from './parentChainIsArbitrum'; +import { nodeConfigBuilderDefaults } from './nodeConfigBuilderDefaults'; +import { ParentChainId } from './types/ParentChain'; +import { ChainConfig } from './types/ChainConfig'; +import { + NodeConfigChainInfoJson, + NodeConfigDataAvailabilityRpcAggregatorBackendsJson, +} from './types/NodeConfig'; + +export type NodeConfigOptionKey = NodeConfigOption['key']; + +export type NodeConfigOptionGetType = Extract< + NodeConfigOption, + { key: TKey } +>['type']; + +export type NodeConfigBuilderEnableBatchPosterParams = { + privateKey: string; +}; + +export type NodeConfigBuilderEnableStakerParams = { + privateKey: string; +}; + +export type RpcAggregatorBackendsItem = { + url: string; + pubkey: string; + signermask: number; +}; + +export type NodeConfigBuilderEnableDataAvailabilityServiceParams = { + restAggregator: { + urls: string[]; + }; + rpcAggregator: { + assumedHonest?: undefined; + backends: RpcAggregatorBackendsItem[]; + }; +}; + +export class NodeConfigBuilder { + /** + * The underlying node config object being built. + */ + private nodeConfig: NodeConfig; + /** + * Whether or not the builder was initialized with the necessary data. + */ + private initialized: boolean; + + constructor(initialNodeConfig?: NodeConfig) { + // it's necessary to do a spread operator here to make sure we create a copy of the object + this.nodeConfig = { ...initialNodeConfig } ?? {}; + this.initialized = false; + } + + private prepareChainInfoJson(params: InitializeThings): NodeConfigChainInfoJson { + return [ + { + 'chain-id': params.chain.id, + 'chain-name': params.chain.name, + 'chain-config': params.chain.config, + 'parent-chain-id': params.parentChain.id, + 'parent-chain-is-arbitrum': parentChainIsArbitrum(params.parentChain.id), + 'rollup': { + 'bridge': params.parentChain.coreContracts.bridge, + 'inbox': params.parentChain.coreContracts.inbox, + 'sequencer-inbox': params.parentChain.coreContracts.sequencerInbox, + 'rollup': params.parentChain.coreContracts.rollup, + 'validator-utils': params.parentChain.coreContracts.validatorUtils, + 'validator-wallet-creator': params.parentChain.coreContracts.validatorWalletCreator, + 'deployed-at': params.parentChain.coreContracts.deployedAtBlockNumber, + }, + }, + ]; + } + + private privateSet( + key: TKey, + value: NodeConfigOptionGetType, + ): NodeConfigBuilder { + _set(this.nodeConfig, key, value); + + return this; + } + + public initialize(params: InitializeThings): NodeConfigBuilder { + const chainInfoJson = stringifyJson(this.prepareChainInfoJson(params)); + + this.privateSet('chain.name', params.chain.name); + this.privateSet('chain.info-json', chainInfoJson); + + this.privateSet('parent-chain.id', params.parentChain.id); + this.privateSet('parent-chain.connection.url', params.parentChain.rpcUrl); + + this.initialized = true; + + return this; + } + + public enableSequencer(): NodeConfigBuilder { + this.set('node.sequencer', true); + + this.set('node.delayed-sequencer.enable', true); + this.set('node.delayed-sequencer.use-merge-finality', false); + this.set('node.delayed-sequencer.finalize-distance', 1); + + this.set('node.dangerous.no-sequencer-coordinator', true); + + this.set('execution.forwarding-target', ''); + + this.set('execution.sequencer.enable', true); + this.set('execution.sequencer.max-tx-data-size', 85_000); + this.set('execution.sequencer.max-block-speed', '250ms'); + + this.set('execution.caching.archive', true); + + return this; + } + + public enableBatchPoster(params: NodeConfigBuilderEnableBatchPosterParams): NodeConfigBuilder { + this.set('node.batch-poster.enable', true); + this.set('node.batch-poster.max-size', 90_000); + this.set('node.batch-poster.parent-chain-wallet.private-key', params.privateKey); + + return this; + } + + public enableStaker(params: NodeConfigBuilderEnableStakerParams): NodeConfigBuilder { + this.set('node.staker.enable', true); + this.set('node.staker.strategy', 'MakeNodes'); + this.set('node.staker.parent-chain-wallet.private-key', params.privateKey); + + return this; + } + + public enableDataAvailabilityService( + params: NodeConfigBuilderEnableDataAvailabilityServiceParams, + ): NodeConfigBuilder { + const backends = params.rpcAggregator.backends.map((backend, index) => ({ + ...backend, + signermask: 1 << index, // 2^n + })); + + const rpcAggregatorAssumedHonest = params.rpcAggregator.assumedHonest ?? 1; + const rpcAggregatorBackendsJson = + stringifyJson(backends); + + this.set('node.data-availability.enable', true); + + this.set('node.data-availability.rest-aggregator.enable', true); + this.set('node.data-availability.rest-aggregator.urls', params.restAggregator.urls); + + this.set('node.data-availability.rpc-aggregator.enable', true); + this.set('node.data-availability.rpc-aggregator.assumed-honest', rpcAggregatorAssumedHonest); + this.set('node.data-availability.rpc-aggregator.backends', rpcAggregatorBackendsJson); + + return this; + } + + public set( + key: TKey, + value: NodeConfigOptionGetType, + ): NodeConfigBuilder { + if (!this.initialized) { + throw new Error(`You must first call ".initialize()" on the builder`); + } + + this.privateSet(key, value); + + return this; + } + + build(): NodeConfig { + return this.nodeConfig; + } +} + +type NodeConfigCoreContracts = Pick< + CoreContracts, + | 'bridge' + | 'inbox' + | 'sequencerInbox' + | 'rollup' + | 'validatorUtils' + | 'validatorWalletCreator' + | 'deployedAtBlockNumber' +>; + +export type InitializeThings = { + chain: { + id: number; + name: string; + config: ChainConfig; + }; + parentChain: { + id: ParentChainId; + rpcUrl: string; + coreContracts: NodeConfigCoreContracts; + }; +}; + +export type CreateNodeConfigBuilderParams = { + withDefaults?: boolean; +}; + +export function createNodeConfigBuilder(params?: CreateNodeConfigBuilderParams): NodeConfigBuilder { + const withDefaults = params?.withDefaults ?? true; + + if (!withDefaults) { + return new NodeConfigBuilder(); + } + + return new NodeConfigBuilder(nodeConfigBuilderDefaults); +} diff --git a/src/nodeConfigBuilder.unit.test.ts b/src/nodeConfigBuilder.unit.test.ts new file mode 100644 index 00000000..7000c57c --- /dev/null +++ b/src/nodeConfigBuilder.unit.test.ts @@ -0,0 +1,79 @@ +import { it, expect } from 'vitest'; + +import { createNodeConfigBuilder, InitializeThings } from './nodeConfigBuilder'; +import { prepareChainConfig } from './prepareChainConfig'; + +const initializeParams: InitializeThings = { + chain: { + id: 123_456, + name: 'my-l3-chain', + config: prepareChainConfig({ + chainId: 123_456, + arbitrum: { InitialChainOwner: '0x1000000000000000000000000000000000000000' }, + }), + }, + parentChain: { + id: 421_614, + rpcUrl: 'https://sepolia-rollup.arbitrum.io/rpc', + coreContracts: { + bridge: '0x0000000000000000000000000000000000000001', + inbox: '0x0000000000000000000000000000000000000002', + sequencerInbox: '0x0000000000000000000000000000000000000003', + rollup: '0x0000000000000000000000000000000000000004', + validatorUtils: '0x0000000000000000000000000000000000000005', + validatorWalletCreator: '0x0000000000000000000000000000000000000006', + deployedAtBlockNumber: 456_789, + }, + }, +}; + +it('creates node config', () => { + const nodeConfig = createNodeConfigBuilder() + // + .initialize(initializeParams) + .build(); + + expect(nodeConfig).toMatchSnapshot(); +}); + +it('creates node config without defaults', () => { + const nodeConfig = createNodeConfigBuilder({ withDefaults: false }) + .initialize(initializeParams) + .build(); + + expect(nodeConfig).toMatchSnapshot(); +}); + +it('creates node config with data availability service', () => { + const nodeConfig = createNodeConfigBuilder() + .initialize(initializeParams) + .enableDataAvailabilityService({ + restAggregator: { + urls: ['http://localhost:9877'], + }, + rpcAggregator: { + backends: [ + { + url: 'http://localhost:9876', + pubkey: + 'YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==', + signermask: 1, + }, + ], + }, + }) + .build(); + + expect(nodeConfig).toMatchSnapshot(); +}); + +it('creates node config with sequencer, batch poster and staker', () => { + const nodeConfig = createNodeConfigBuilder() + .initialize(initializeParams) + .enableSequencer() + .enableBatchPoster({ privateKey: 'BATCH_POSTER_PRIVATE_KEY' }) + .enableStaker({ privateKey: 'STAKER_PRIVATE_KEY' }) + .build(); + + expect(nodeConfig).toMatchSnapshot(); +}); diff --git a/src/nodeConfigBuilderDefaults.ts b/src/nodeConfigBuilderDefaults.ts new file mode 100644 index 00000000..8aaafa1d --- /dev/null +++ b/src/nodeConfigBuilderDefaults.ts @@ -0,0 +1,11 @@ +import { NodeConfig } from './types/NodeConfig.generated'; + +export const nodeConfigBuilderDefaults: NodeConfig = { + http: { + addr: '0.0.0.0', + port: 8449, + vhosts: ['*'], + corsdomain: ['*'], + api: ['eth', 'net', 'web3', 'arb', 'debug'], + }, +}; diff --git a/src/nodeConfigBuilderUtils.ts b/src/nodeConfigBuilderUtils.ts new file mode 100644 index 00000000..3d38e8d0 --- /dev/null +++ b/src/nodeConfigBuilderUtils.ts @@ -0,0 +1,3 @@ +export function stringifyJson(json: TJson): string { + return JSON.stringify(json); +} diff --git a/src/package.json b/src/package.json index a403add8..8e948001 100644 --- a/src/package.json +++ b/src/package.json @@ -53,6 +53,7 @@ "dependencies": { "@arbitrum/sdk": "^4.0.0-alpha.4", "@arbitrum/token-bridge-contracts": "^1.2.1", - "ethers": "^5.7.2" + "ethers": "^5.7.2", + "lodash.set": "^4.3.2" } } diff --git a/src/scripts/generateNodeConfigType.ts b/src/scripts/generateNodeConfigType.ts index c89883a9..79a3c351 100644 --- a/src/scripts/generateNodeConfigType.ts +++ b/src/scripts/generateNodeConfigType.ts @@ -2,7 +2,7 @@ import { execSync } from 'child_process'; import { readFileSync, rmSync } from 'fs'; import { Project, WriterFunction, Writers } from 'ts-morph'; -const { objectType } = Writers; +const { objectType, unionType } = Writers; function getNitroNodeImageTag(): string { const defaultNitroNodeTag = 'v2.3.3-6a1c1a7'; @@ -188,7 +188,32 @@ function main() { sourceFile.addTypeAlias({ name: 'NodeConfig', type: getTypeRecursively(cliOptionsNestedObject), - docs: ['Nitro node configuration options'], + docs: ['Nitro node configuration object'], + isExported: true, + }); + // append NodeConfigOption type declaration + sourceFile.addTypeAlias({ + name: 'NodeConfigOption', + type: unionType( + // not sure why ts-morph is acting weird here + // @ts-ignore + ...cliOptions.map((option) => + objectType({ + properties: [ + { + name: 'key', + type: `"${option.name}"`, + docs: option.docs, + }, + { + name: 'type', + type: option.type, + }, + ], + }), + ), + ), + docs: ['Union type for all Nitro node configuration options'], isExported: true, }); diff --git a/src/types/NodeConfig.generated.ts b/src/types/NodeConfig.generated.ts index e3d1835b..e6b18e03 100644 --- a/src/types/NodeConfig.generated.ts +++ b/src/types/NodeConfig.generated.ts @@ -3,10 +3,10 @@ // THIS FILE IS AUTOMATICALLY GENERATED AND SHOULD NOT BE EDITED MANUALLY // // IMAGE: offchainlabs/nitro-node:v2.3.3-6a1c1a7 -// TIMESTAMP: 2024-04-18T09:36:12.267Z +// TIMESTAMP: 2024-06-06T09:22:38.394Z // // --- -/** Nitro node configuration options */ +/** Nitro node configuration object */ export type NodeConfig = { 'auth'?: { /** AUTH-RPC server listening interface (default "127.0.0.1") */ @@ -1119,3 +1119,2335 @@ export type NodeConfig = { 'rpcprefix'?: string; }; }; +/** Union type for all Nitro node configuration options */ +export type NodeConfigOption = + | { + /** AUTH-RPC server listening interface (default "127.0.0.1") */ + key: 'auth.addr'; + type: string; + } + | { + /** APIs offered over the AUTH-RPC interface (default [validation]) */ + key: 'auth.api'; + type: string[]; + } + | { + /** Path to file holding JWT secret (32B hex) */ + key: 'auth.jwtsecret'; + type: string; + } + | { + /** Origins from which to accept AUTH requests (default [localhost]) */ + key: 'auth.origins'; + type: string[]; + } + | { + /** AUTH-RPC server listening port (default 8549) */ + key: 'auth.port'; + type: number; + } + | { + /** minimum number of blocks to execute per thread. When mode is random this acts as the size of random block range sample (default 10000) */ + key: 'blocks-reexecutor.blocks-per-thread'; + type: number; + } + | { + /** enables re-execution of a range of blocks against historic state */ + key: 'blocks-reexecutor.enable'; + type: boolean; + } + | { + /** last block number of the block range for re-execution */ + key: 'blocks-reexecutor.end-block'; + type: number; + } + | { + /** mode to run the blocks-reexecutor on. Valid modes full and random. full - execute all the blocks in the given range. random - execute a random sample range of blocks with in a given range (default "random") */ + key: 'blocks-reexecutor.mode'; + type: string; + } + | { + /** number of threads to parallelize blocks re-execution (default 12) */ + key: 'blocks-reexecutor.room'; + type: number; + } + | { + /** first block number of the block range for re-execution */ + key: 'blocks-reexecutor.start-block'; + type: number; + } + | { + /** account to use (default is first account in keystore) */ + key: 'chain.dev-wallet.account'; + type: string; + } + | { + /** if true, creates new key then exits */ + key: 'chain.dev-wallet.only-create-key'; + type: boolean; + } + | { + /** wallet passphrase (default "PASSWORD_NOT_SET") */ + key: 'chain.dev-wallet.password'; + type: string; + } + | { + /** pathname for wallet */ + key: 'chain.dev-wallet.pathname'; + type: string; + } + | { + /** private key for wallet */ + key: 'chain.dev-wallet.private-key'; + type: string; + } + | { + /** L2 chain ID (determines Arbitrum network) */ + key: 'chain.id'; + type: number; + } + | { + /** L2 chain info json files */ + key: 'chain.info-files'; + type: string[]; + } + | { + /** path to save temp downloaded file (default "/tmp/") */ + key: 'chain.info-ipfs-download-path'; + type: string; + } + | { + /** url to download chain info file */ + key: 'chain.info-ipfs-url'; + type: string; + } + | { + /** L2 chain info in json string format */ + key: 'chain.info-json'; + type: string; + } + | { + /** L2 chain name (determines Arbitrum network) */ + key: 'chain.name'; + type: string; + } + | { + /** print out currently active configuration file */ + key: 'conf.dump'; + type: boolean; + } + | { + /** environment variables with given prefix will be loaded as configuration values */ + key: 'conf.env-prefix'; + type: string; + } + | { + /** name of configuration file */ + key: 'conf.file'; + type: string[]; + } + | { + /** how often to reload configuration (0=disable periodic reloading) */ + key: 'conf.reload-interval'; + type: string; + } + | { + /** S3 access key */ + key: 'conf.s3.access-key'; + type: string; + } + | { + /** S3 bucket */ + key: 'conf.s3.bucket'; + type: string; + } + | { + /** S3 object key */ + key: 'conf.s3.object-key'; + type: string; + } + | { + /** S3 region */ + key: 'conf.s3.region'; + type: string; + } + | { + /** S3 secret key */ + key: 'conf.s3.secret-key'; + type: string; + } + | { + /** configuration as JSON string */ + key: 'conf.string'; + type: string; + } + | { + /** retain past block state */ + key: 'execution.caching.archive'; + type: boolean; + } + | { + /** minimum age of recent blocks to keep in memory (default 30m0s) */ + key: 'execution.caching.block-age'; + type: string; + } + | { + /** minimum number of recent blocks to keep in memory (default 128) */ + key: 'execution.caching.block-count'; + type: number; + } + | { + /** amount of memory in megabytes to cache database contents with (default 2048) */ + key: 'execution.caching.database-cache'; + type: number; + } + | { + /** maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues */ + key: 'execution.caching.max-amount-of-gas-to-skip-state-saving'; + type: number; + } + | { + /** maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues */ + key: 'execution.caching.max-number-of-blocks-to-skip-state-saving'; + type: number; + } + | { + /** amount of memory in megabytes to cache state snapshots with (default 400) */ + key: 'execution.caching.snapshot-cache'; + type: number; + } + | { + /** maximum gas rolled back to recover snapshot (default 300000000000) */ + key: 'execution.caching.snapshot-restore-gas-limit'; + type: number; + } + | { + /** amount of memory in megabytes to cache unchanged state trie nodes with (default 600) */ + key: 'execution.caching.trie-clean-cache'; + type: number; + } + | { + /** amount of memory in megabytes to cache state diffs against disk with (larger cache lowers database growth) (default 1024) */ + key: 'execution.caching.trie-dirty-cache'; + type: number; + } + | { + /** maximum block processing time before trie is written to hard-disk (default 1h0m0s) */ + key: 'execution.caching.trie-time-limit'; + type: string; + } + | { + /** DANGEROUS! forces a reorg to an old block height. To be used for testing only. -1 to disable (default -1) */ + key: 'execution.dangerous.reorg-to-block'; + type: number; + } + | { + /** enable prefetching of blocks (default true) */ + key: 'execution.enable-prefetch-block'; + type: boolean; + } + | { + /** total time to wait before cancelling connection (default 30s) */ + key: 'execution.forwarder.connection-timeout'; + type: string; + } + | { + /** time until idle connections are closed (default 15s) */ + key: 'execution.forwarder.idle-connection-timeout'; + type: string; + } + | { + /** maximum number of idle connections to keep open (default 1) */ + key: 'execution.forwarder.max-idle-connections'; + type: number; + } + | { + /** the Redis URL to recomend target via */ + key: 'execution.forwarder.redis-url'; + type: string; + } + | { + /** minimal time between update retries (default 100ms) */ + key: 'execution.forwarder.retry-interval'; + type: string; + } + | { + /** forwarding target update interval (default 1s) */ + key: 'execution.forwarder.update-interval'; + type: string; + } + | { + /** transaction forwarding target URL, or "null" to disable forwarding (iff not sequencer) */ + key: 'execution.forwarding-target'; + type: string; + } + | { + /** Dangerous! only meant to be used by system tests */ + key: 'execution.parent-chain-reader.dangerous.wait-for-tx-approval-safe-poll'; + type: string; + } + | { + /** enable reader connection (default true) */ + key: 'execution.parent-chain-reader.enable'; + type: boolean; + } + | { + /** warns if the latest l1 block is at least this old (default 5m0s) */ + key: 'execution.parent-chain-reader.old-header-timeout'; + type: string; + } + | { + /** interval when polling endpoint (default 15s) */ + key: 'execution.parent-chain-reader.poll-interval'; + type: string; + } + | { + /** do not attempt to subscribe to header events */ + key: 'execution.parent-chain-reader.poll-only'; + type: boolean; + } + | { + /** interval for subscribe error (default 5m0s) */ + key: 'execution.parent-chain-reader.subscribe-err-interval'; + type: string; + } + | { + /** timeout when waiting for a transaction (default 5m0s) */ + key: 'execution.parent-chain-reader.tx-timeout'; + type: string; + } + | { + /** use l1 data about finalized/safe blocks (default true) */ + key: 'execution.parent-chain-reader.use-finality-data'; + type: boolean; + } + | { + /** like trie-clean-cache for the separate, recording database (used for validation) (default 16) */ + key: 'execution.recording-database.trie-clean-cache'; + type: number; + } + | { + /** like trie-dirty-cache for the separate, recording database (used for validation) (default 1024) */ + key: 'execution.recording-database.trie-dirty-cache'; + type: number; + } + | { + /** list of whitelisted rpc methods */ + key: 'execution.rpc.allow-method'; + type: string[]; + } + | { + /** bounds the number of blocks arbdebug calls may return (default 256) */ + key: 'execution.rpc.arbdebug.block-range-bound'; + type: number; + } + | { + /** bounds the length of timeout queues arbdebug calls may return (default 512) */ + key: 'execution.rpc.arbdebug.timeout-queue-bound'; + type: number; + } + | { + /** number of blocks a single bloom bit section vector holds (default 16384) */ + key: 'execution.rpc.bloom-bits-blocks'; + type: number; + } + | { + /** number of confirmation blocks before a bloom section is considered final (default 256) */ + key: 'execution.rpc.bloom-confirms'; + type: number; + } + | { + /** url to redirect classic requests, use "error:[CODE:]MESSAGE" to return specified error instead of redirecting */ + key: 'execution.rpc.classic-redirect'; + type: string; + } + | { + /** timeout for forwarded classic requests, where 0 = no timeout */ + key: 'execution.rpc.classic-redirect-timeout'; + type: string; + } + | { + /** timeout used for eth_call (0=infinite) (default 5s) */ + key: 'execution.rpc.evm-timeout'; + type: string; + } + | { + /** max number of blocks a fee history request may cover (default 1024) */ + key: 'execution.rpc.feehistory-max-block-count'; + type: number; + } + | { + /** log filter system maximum number of cached blocks (default 32) */ + key: 'execution.rpc.filter-log-cache-size'; + type: number; + } + | { + /** log filter system maximum time filters stay active (default 5m0s) */ + key: 'execution.rpc.filter-timeout'; + type: string; + } + | { + /** cap on computation gas that can be used in eth_call/estimateGas (0=infinite) (default 50000000) */ + key: 'execution.rpc.gas-cap'; + type: number; + } + | { + /** maximum depth for recreating state, measured in l2 gas (0=don't recreate state, -1=infinite, -2=use default value for archive or non-archive node (whichever is configured)) (default -2) */ + key: 'execution.rpc.max-recreate-state-depth'; + type: number; + } + | { + /** allow transactions that aren't EIP-155 replay protected to be submitted over the RPC (default true) */ + key: 'execution.rpc.tx-allow-unprotected'; + type: boolean; + } + | { + /** cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) (default 1) */ + key: 'execution.rpc.tx-fee-cap'; + type: number; + } + | { + /** secondary transaction forwarding target URL */ + key: 'execution.secondary-forwarding-target'; + type: string[]; + } + | { + /** act and post to l1 as sequencer */ + key: 'execution.sequencer.enable'; + type: boolean; + } + | { + /** total time to wait before cancelling connection (default 30s) */ + key: 'execution.sequencer.forwarder.connection-timeout'; + type: string; + } + | { + /** time until idle connections are closed (default 1m0s) */ + key: 'execution.sequencer.forwarder.idle-connection-timeout'; + type: string; + } + | { + /** maximum number of idle connections to keep open (default 100) */ + key: 'execution.sequencer.forwarder.max-idle-connections'; + type: number; + } + | { + /** the Redis URL to recomend target via */ + key: 'execution.sequencer.forwarder.redis-url'; + type: string; + } + | { + /** minimal time between update retries (default 100ms) */ + key: 'execution.sequencer.forwarder.retry-interval'; + type: string; + } + | { + /** forwarding target update interval (default 1s) */ + key: 'execution.sequencer.forwarder.update-interval'; + type: string; + } + | { + /** maximum acceptable time difference between the local time and the latest L1 block's timestamp (default 1h0m0s) */ + key: 'execution.sequencer.max-acceptable-timestamp-delta'; + type: string; + } + | { + /** minimum delay between blocks (sets a maximum speed of block production) (default 250ms) */ + key: 'execution.sequencer.max-block-speed'; + type: string; + } + | { + /** maximum gas executed in a revert for the sequencer to reject the transaction instead of posting it (anti-DOS) (default 31000) */ + key: 'execution.sequencer.max-revert-gas-reject'; + type: number; + } + | { + /** maximum transaction size the sequencer will accept (default 95000) */ + key: 'execution.sequencer.max-tx-data-size'; + type: number; + } + | { + /** size of the tx sender nonce cache (default 1024) */ + key: 'execution.sequencer.nonce-cache-size'; + type: number; + } + | { + /** maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high (default 1s) */ + key: 'execution.sequencer.nonce-failure-cache-expiry'; + type: string; + } + | { + /** number of transactions with too high of a nonce to keep in memory while waiting for their predecessor (default 1024) */ + key: 'execution.sequencer.nonce-failure-cache-size'; + type: number; + } + | { + /** size of the pending tx queue (default 1024) */ + key: 'execution.sequencer.queue-size'; + type: number; + } + | { + /** maximum amount of time transaction can wait in queue (default 12s) */ + key: 'execution.sequencer.queue-timeout'; + type: string; + } + | { + /** comma separated whitelist of authorized senders (if empty, everyone is allowed) */ + key: 'execution.sequencer.sender-whitelist'; + type: string; + } + | { + /** retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks) (default 126230400) */ + key: 'execution.tx-lookup-limit'; + type: number; + } + | { + /** how long ago should the storage conditions from eth_SendRawTransactionConditional be true, 0 = don't check old state (default 2) */ + key: 'execution.tx-pre-checker.required-state-age'; + type: number; + } + | { + /** maximum number of blocks to look back while looking for the seconds old state, 0 = don't limit the search (default 4) */ + key: 'execution.tx-pre-checker.required-state-max-blocks'; + type: number; + } + | { + /** how strict to be when checking txs before forwarding them. 0 = accept anything, 10 = should never reject anything that'd succeed, 20 = likely won't reject anything that'd succeed, 30 = full validation which may reject txs that would succeed */ + key: 'execution.tx-pre-checker.strictness'; + type: number; + } + | { + /** size of intermediate log records buffer (default 512) */ + key: 'file-logging.buf-size'; + type: number; + } + | { + /** enable compression of old log files (default true) */ + key: 'file-logging.compress'; + type: boolean; + } + | { + /** enable logging to file (default true) */ + key: 'file-logging.enable'; + type: boolean; + } + | { + /** path to log file (default "nitro.log") */ + key: 'file-logging.file'; + type: string; + } + | { + /** if true: local time will be used in old log filename timestamps */ + key: 'file-logging.local-time'; + type: boolean; + } + | { + /** maximum number of days to retain old log files based on the timestamp encoded in their filename (0 = no limit) */ + key: 'file-logging.max-age'; + type: number; + } + | { + /** maximum number of old log files to retain (0 = no limit) (default 20) */ + key: 'file-logging.max-backups'; + type: number; + } + | { + /** log file size in Mb that will trigger log file rotation (0 = trigger disabled) (default 5) */ + key: 'file-logging.max-size'; + type: number; + } + | { + /** Comma separated list of domains from which to accept cross origin requests (browser enforced) */ + key: 'graphql.corsdomain'; + type: string[]; + } + | { + /** Enable graphql endpoint on the rpc endpoint */ + key: 'graphql.enable'; + type: boolean; + } + | { + /** Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard (default [localhost]) */ + key: 'graphql.vhosts'; + type: string[]; + } + | { + /** HTTP-RPC server listening interface */ + key: 'http.addr'; + type: string; + } + | { + /** APIs offered over the HTTP-RPC interface (default [net,web3,eth,arb]) */ + key: 'http.api'; + type: string[]; + } + | { + /** Comma separated list of domains from which to accept cross origin requests (browser enforced) */ + key: 'http.corsdomain'; + type: string[]; + } + | { + /** HTTP-RPC server listening port (default 8547) */ + key: 'http.port'; + type: number; + } + | { + /** HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths */ + key: 'http.rpcprefix'; + type: string; + } + | { + /** the maximum amount of time to wait for the next request when keep-alives are enabled (http.Server.IdleTimeout) (default 2m0s) */ + key: 'http.server-timeouts.idle-timeout'; + type: string; + } + | { + /** the amount of time allowed to read the request headers (http.Server.ReadHeaderTimeout) (default 30s) */ + key: 'http.server-timeouts.read-header-timeout'; + type: string; + } + | { + /** the maximum duration for reading the entire request (http.Server.ReadTimeout) (default 30s) */ + key: 'http.server-timeouts.read-timeout'; + type: string; + } + | { + /** the maximum duration before timing out writes of the response (http.Server.WriteTimeout) (default 30s) */ + key: 'http.server-timeouts.write-timeout'; + type: string; + } + | { + /** Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard (default [localhost]) */ + key: 'http.vhosts'; + type: string[]; + } + | { + /** during init - sync database every X accounts. Lower value for low-memory systems. 0 disables. (default 100000) */ + key: 'init.accounts-per-sync'; + type: number; + } + | { + /** init with dev data (1 account with balance) instead of file import */ + key: 'init.dev-init'; + type: boolean; + } + | { + /** Address of dev-account. Leave empty to use the dev-wallet. */ + key: 'init.dev-init-address'; + type: string; + } + | { + /** Number of preinit blocks. Must exist in ancient database. */ + key: 'init.dev-init-blocknum'; + type: number; + } + | { + /** path to save temp downloaded file (default "/tmp/") */ + key: 'init.download-path'; + type: string; + } + | { + /** how long to wait between polling attempts (default 1m0s) */ + key: 'init.download-poll'; + type: string; + } + | { + /** init with empty state */ + key: 'init.empty'; + type: boolean; + } + | { + /** if true: in case database exists init code will be reexecuted and genesis block compared to database */ + key: 'init.force'; + type: boolean; + } + | { + /** path for json data to import */ + key: 'init.import-file'; + type: string; + } + | { + /** pruning for a given use: "full" for full nodes serving RPC requests, or "validator" for validators */ + key: 'init.prune'; + type: string; + } + | { + /** the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better) (default 2048) */ + key: 'init.prune-bloom-size'; + type: number; + } + | { + /** block number to start recreating missing states from (0 = disabled) */ + key: 'init.recreate-missing-state-from'; + type: number; + } + | { + /** forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages (default -1) */ + key: 'init.reset-to-message'; + type: number; + } + | { + /** quit after init is done */ + key: 'init.then-quit'; + type: boolean; + } + | { + /** url to download initializtion data - will poll if download fails */ + key: 'init.url'; + type: string; + } + | { + /** Requested location to place the IPC endpoint. An empty path disables IPC. */ + key: 'ipc.path'; + type: string; + } + | { + /** log level (default 3) */ + key: 'log-level'; + type: number; + } + | { + /** log type (plaintext or json) (default "plaintext") */ + key: 'log-type'; + type: string; + } + | { + /** enable metrics */ + key: 'metrics'; + type: boolean; + } + | { + /** metrics server address (default "127.0.0.1") */ + key: 'metrics-server.addr'; + type: string; + } + | { + /** metrics server port (default 6070) */ + key: 'metrics-server.port'; + type: number; + } + | { + /** metrics server update interval (default 3s) */ + key: 'metrics-server.update-interval'; + type: string; + } + | { + /** batch compression level (default 11) */ + key: 'node.batch-poster.compression-level'; + type: number; + } + | { + /** In AnyTrust mode, the period which DASes are requested to retain the stored batches. (default 360h0m0s) */ + key: 'node.batch-poster.das-retention-period'; + type: string; + } + | { + /** if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance (default true) */ + key: 'node.batch-poster.data-poster.allocate-mempool-balance'; + type: boolean; + } + | { + /** comma-separated list of durations since first posting a blob transaction to attempt a replace-by-fee (default "5m,10m,30m,1h,4h,8h,16h,22h") */ + key: 'node.batch-poster.data-poster.blob-tx-replacement-times'; + type: string; + } + | { + /** clear database storage */ + key: 'node.batch-poster.data-poster.dangerous.clear-dbstorage'; + type: boolean; + } + | { + /** unit to measure the time elapsed since creation of transaction used for maximum fee cap calculation (default 10m0s) */ + key: 'node.batch-poster.data-poster.elapsed-time-base'; + type: string; + } + | { + /** weight given to the units of time elapsed used for maximum fee cap calculation (default 10) */ + key: 'node.batch-poster.data-poster.elapsed-time-importance'; + type: number; + } + | { + /** external signer address */ + key: 'node.batch-poster.data-poster.external-signer.address'; + type: string; + } + | { + /** rpc client cert */ + key: 'node.batch-poster.data-poster.external-signer.client-cert'; + type: string; + } + | { + /** rpc client private key */ + key: 'node.batch-poster.data-poster.external-signer.client-private-key'; + type: string; + } + | { + /** external signer method (default "eth_signTransaction") */ + key: 'node.batch-poster.data-poster.external-signer.method'; + type: string; + } + | { + /** external signer root CA */ + key: 'node.batch-poster.data-poster.external-signer.root-ca'; + type: string; + } + | { + /** external signer url */ + key: 'node.batch-poster.data-poster.external-signer.url'; + type: string; + } + | { + /** encodes items in a legacy way (as it was before dropping generics) */ + key: 'node.batch-poster.data-poster.legacy-storage-encoding'; + type: boolean; + } + | { + /** the maximum tip cap to post EIP-4844 blob carrying transactions at (default 1) */ + key: 'node.batch-poster.data-poster.max-blob-tx-tip-cap-gwei'; + type: number; + } + | { + /** the maximum multiple of the current price to bid for a transaction's fees (may be exceeded due to min rbf increase, 0 = unlimited) (default 100000) */ + key: 'node.batch-poster.data-poster.max-fee-bid-multiple-bips'; + type: number; + } + | { + /** mathematical formula to calculate maximum fee cap gwei the result of which would be float64. This expression is expected to be evaluated please refer https://github.com/Knetic/govaluate/blob/master/MANUAL.md to find all available mathematical operators. Currently available variables to construct the formula are BacklogOfBatches, UrgencyGWei, ElapsedTime, ElapsedTimeBase, ElapsedTimeImportance, and TargetPriceGWei (default "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei") */ + key: 'node.batch-poster.data-poster.max-fee-cap-formula'; + type: string; + } + | { + /** the maximum number of transactions to have queued in the mempool at once (0 = unlimited) (default 18) */ + key: 'node.batch-poster.data-poster.max-mempool-transactions'; + type: number; + } + | { + /** the maximum number of weight (weight = min(1, tx.blobs)) to have queued in the mempool at once (0 = unlimited) (default 18) */ + key: 'node.batch-poster.data-poster.max-mempool-weight'; + type: number; + } + | { + /** the maximum number of unconfirmed transactions to track at once (0 = unlimited) */ + key: 'node.batch-poster.data-poster.max-queued-transactions'; + type: number; + } + | { + /** the maximum tip cap to post transactions at (default 5) */ + key: 'node.batch-poster.data-poster.max-tip-cap-gwei'; + type: number; + } + | { + /** the minimum tip cap to post EIP-4844 blob carrying transactions at (default 1) */ + key: 'node.batch-poster.data-poster.min-blob-tx-tip-cap-gwei'; + type: number; + } + | { + /** the minimum tip cap to post transactions at (default 0.05) */ + key: 'node.batch-poster.data-poster.min-tip-cap-gwei'; + type: number; + } + | { + /** the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee (default 1) */ + key: 'node.batch-poster.data-poster.nonce-rbf-soft-confs'; + type: number; + } + | { + /** disable message signature verification */ + key: 'node.batch-poster.data-poster.redis-signer.dangerous.disable-signature-verification'; + type: boolean; + } + | { + /** a fallback key used for message verification */ + key: 'node.batch-poster.data-poster.redis-signer.fallback-verification-key'; + type: string; + } + | { + /** a 32-byte (64-character) hex string used to sign messages, or a path to a file containing it */ + key: 'node.batch-poster.data-poster.redis-signer.signing-key'; + type: string; + } + | { + /** comma-separated list of durations since first posting to attempt a replace-by-fee (default "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h") */ + key: 'node.batch-poster.data-poster.replacement-times'; + type: string; + } + | { + /** the target price to use for maximum fee cap calculation (default 60) */ + key: 'node.batch-poster.data-poster.target-price-gwei'; + type: number; + } + | { + /** the urgency to use for maximum fee cap calculation (default 2) */ + key: 'node.batch-poster.data-poster.urgency-gwei'; + type: number; + } + | { + /** uses database storage when enabled (default true) */ + key: 'node.batch-poster.data-poster.use-db-storage'; + type: boolean; + } + | { + /** uses noop storage, it doesn't store anything */ + key: 'node.batch-poster.data-poster.use-noop-storage'; + type: boolean; + } + | { + /** only treat a transaction as confirmed after L1 finality has been achieved (recommended) (default true) */ + key: 'node.batch-poster.data-poster.wait-for-l1-finality'; + type: boolean; + } + | { + /** If unable to batch to DAS, disable fallback storing data on chain */ + key: 'node.batch-poster.disable-das-fallback-store-data-on-chain'; + type: boolean; + } + | { + /** enable posting batches to l1 */ + key: 'node.batch-poster.enable'; + type: boolean; + } + | { + /** how long to delay after error posting batch (default 10s) */ + key: 'node.batch-poster.error-delay'; + type: string; + } + | { + /** use this much more gas than estimation says is necessary to post batches (default 50000) */ + key: 'node.batch-poster.extra-batch-gas'; + type: number; + } + | { + /** for gas estimation, use this multiple of the basefee (measured in basis points) as the max fee per gas (default 15000) */ + key: 'node.batch-poster.gas-estimate-base-fee-multiple-bips'; + type: number; + } + | { + /** The gas refunder contract address (optional) */ + key: 'node.batch-poster.gas-refunder-address'; + type: string; + } + | { + /** if the parent chain supports 4844 blobs and ignore-blob-price is true, post 4844 blobs even if it's not price efficient */ + key: 'node.batch-poster.ignore-blob-price'; + type: boolean; + } + | { + /** only post messages to batches when they're within the max future block/timestamp as of this L1 block tag ("safe", "finalized", "latest", or "ignore" to ignore this check) */ + key: 'node.batch-poster.l1-block-bound'; + type: string; + } + | { + /** post batches even if not within the layer 1 future bounds if we're within this margin of the max delay (default 1h0m0s) */ + key: 'node.batch-poster.l1-block-bound-bypass'; + type: string; + } + | { + /** maximum 4844 blob enabled batch size (default 779288) */ + key: 'node.batch-poster.max-4844-batch-size'; + type: number; + } + | { + /** maximum batch posting delay (default 1h0m0s) */ + key: 'node.batch-poster.max-delay'; + type: string; + } + | { + /** maximum batch size (default 100000) */ + key: 'node.batch-poster.max-size'; + type: number; + } + | { + /** account to use (default is first account in keystore) */ + key: 'node.batch-poster.parent-chain-wallet.account'; + type: string; + } + | { + /** if true, creates new key then exits */ + key: 'node.batch-poster.parent-chain-wallet.only-create-key'; + type: boolean; + } + | { + /** wallet passphrase (default "PASSWORD_NOT_SET") */ + key: 'node.batch-poster.parent-chain-wallet.password'; + type: string; + } + | { + /** pathname for wallet (default "batch-poster-wallet") */ + key: 'node.batch-poster.parent-chain-wallet.pathname'; + type: string; + } + | { + /** private key for wallet */ + key: 'node.batch-poster.parent-chain-wallet.private-key'; + type: string; + } + | { + /** how long to wait after no batches are ready to be posted before checking again (default 10s) */ + key: 'node.batch-poster.poll-interval'; + type: string; + } + | { + /** if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs */ + key: 'node.batch-poster.post-4844-blobs'; + type: boolean; + } + | { + /** should node always try grabing lock in background */ + key: 'node.batch-poster.redis-lock.background-lock'; + type: boolean; + } + | { + /** if false, always treat this as locked and don't write the lock to redis (default true) */ + key: 'node.batch-poster.redis-lock.enable'; + type: boolean; + } + | { + /** key for lock */ + key: 'node.batch-poster.redis-lock.key'; + type: string; + } + | { + /** how long lock is held (default 1m0s) */ + key: 'node.batch-poster.redis-lock.lockout-duration'; + type: string; + } + | { + /** this node's id prefix when acquiring the lock (optional) */ + key: 'node.batch-poster.redis-lock.my-id'; + type: string; + } + | { + /** how long between consecutive calls to redis (default 10s) */ + key: 'node.batch-poster.redis-lock.refresh-duration'; + type: string; + } + | { + /** if non-empty, the Redis URL to store queued transactions in */ + key: 'node.batch-poster.redis-url'; + type: string; + } + | { + /** post batches with access lists to reduce gas usage (disabled for L3s) (default true) */ + key: 'node.batch-poster.use-access-lists'; + type: boolean; + } + | { + /** wait for the max batch delay, even if the batch is full */ + key: 'node.batch-poster.wait-for-max-delay'; + type: boolean; + } + | { + /** current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash) (default "current") */ + key: 'node.block-validator.current-module-root'; + type: string; + } + | { + /** resets block-by-block validation, starting again at genesis */ + key: 'node.block-validator.dangerous.reset-block-validation'; + type: boolean; + } + | { + /** enable block-by-block validation */ + key: 'node.block-validator.enable'; + type: boolean; + } + | { + /** failing a validation is treated as a fatal error (default true) */ + key: 'node.block-validator.failure-is-fatal'; + type: boolean; + } + | { + /** prepare entries for up to that many blocks ahead of validation (small footprint) (default 1024) */ + key: 'node.block-validator.forward-blocks'; + type: number; + } + | { + /** minimum free-memory limit after reaching which the blockvalidator pauses validation. Enabled by default as 1GB, to disable provide empty string (default "default") */ + key: 'node.block-validator.memory-free-limit'; + type: string; + } + | { + /** pending upgrade wasm module root to additionally validate (hash, 'latest' or empty) (default "latest") */ + key: 'node.block-validator.pending-upgrade-module-root'; + type: string; + } + | { + /** record that many blocks ahead of validation (larger footprint) (default 24) */ + key: 'node.block-validator.prerecorded-blocks'; + type: number; + } + | { + /** poll time to check validations (default 1s) */ + key: 'node.block-validator.validation-poll'; + type: string; + } + | { + /** array of validation rpc configs given as a json string. time duration should be supplied in number indicating nanoseconds (default "default") */ + key: 'node.block-validator.validation-server-configs-list'; + type: string; + } + | { + /** limit size of arguments in log entries (default 2048) */ + key: 'node.block-validator.validation-server.arg-log-limit'; + type: number; + } + | { + /** how long to wait for initial connection */ + key: 'node.block-validator.validation-server.connection-wait'; + type: string; + } + | { + /** path to file with jwtsecret for validation - ignored if url is self or self-auth */ + key: 'node.block-validator.validation-server.jwtsecret'; + type: string; + } + | { + /** number of retries in case of failure(0 mean one attempt) (default 3) */ + key: 'node.block-validator.validation-server.retries'; + type: number; + } + | { + /** delay between retries */ + key: 'node.block-validator.validation-server.retry-delay'; + type: string; + } + | { + /** Errors matching this regular expression are automatically retried (default "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused") */ + key: 'node.block-validator.validation-server.retry-errors'; + type: string; + } + | { + /** per-response timeout (0-disabled) */ + key: 'node.block-validator.validation-server.timeout'; + type: string; + } + | { + /** url of server, use self for loopback websocket, self-auth for loopback with authentication (default "self-auth") */ + key: 'node.block-validator.validation-server.url'; + type: string; + } + | { + /** DANGEROUS! disables the EIP-4844 blob reader, which is necessary to read batches */ + key: 'node.dangerous.disable-blob-reader'; + type: boolean; + } + | { + /** DANGEROUS! disables listening to L1. To be used in test nodes only */ + key: 'node.dangerous.no-l1-listener'; + type: boolean; + } + | { + /** DANGEROUS! allows sequencing without sequencer-coordinator */ + key: 'node.dangerous.no-sequencer-coordinator'; + type: boolean; + } + | { + /** enable Anytrust Data Availability mode */ + key: 'node.data-availability.enable'; + type: boolean; + } + | { + /** enable storage/retrieval of sequencer batch data from IPFS */ + key: 'node.data-availability.ipfs-storage.enable'; + type: boolean; + } + | { + /** list of IPFS peers to connect to, eg /ip4/1.2.3.4/tcp/12345/p2p/abc...xyz */ + key: 'node.data-availability.ipfs-storage.peers'; + type: string[]; + } + | { + /** pin sequencer batch data in IPFS (default true) */ + key: 'node.data-availability.ipfs-storage.pin-after-get'; + type: boolean; + } + | { + /** percent of sequencer batch data to pin, as a floating point number in the range 0.0 to 100.0 (default 100) */ + key: 'node.data-availability.ipfs-storage.pin-percentage'; + type: number; + } + | { + /** comma separated list of IPFS profiles to use, see https://docs.ipfs.tech/how-to/default-profile */ + key: 'node.data-availability.ipfs-storage.profiles'; + type: string; + } + | { + /** timeout for IPFS reads, since by default it will wait forever. Treat timeout as not found (default 1m0s) */ + key: 'node.data-availability.ipfs-storage.read-timeout'; + type: string; + } + | { + /** directory to use to store the local IPFS repo */ + key: 'node.data-availability.ipfs-storage.repo-dir'; + type: string; + } + | { + /** whether the Data Availability Service should fail immediately on errors (not recommended) */ + key: 'node.data-availability.panic-on-error'; + type: boolean; + } + | { + /** parent chain RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's parent chain configuration is used (default 15) */ + key: 'node.data-availability.parent-chain-connection-attempts'; + type: number; + } + | { + /** URL for parent chain node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used */ + key: 'node.data-availability.parent-chain-node-url'; + type: string; + } + | { + /** Data Availability Service timeout duration for Store requests (default 5s) */ + key: 'node.data-availability.request-timeout'; + type: string; + } + | { + /** enable retrieval of sequencer batch data from a list of remote REST endpoints; if other DAS storage types are enabled, this mode is used as a fallback */ + key: 'node.data-availability.rest-aggregator.enable'; + type: boolean; + } + | { + /** number of stats entries (latency and success rate) to keep for each REST endpoint; controls whether strategy is faster or slower to respond to changing conditions (default 20) */ + key: 'node.data-availability.rest-aggregator.max-per-endpoint-stats'; + type: number; + } + | { + /** a URL to a list of URLs of REST das endpoints that is checked at startup; additive with the url option */ + key: 'node.data-availability.rest-aggregator.online-url-list'; + type: string; + } + | { + /** time interval to periodically fetch url list from online-url-list (default 1h0m0s) */ + key: 'node.data-availability.rest-aggregator.online-url-list-fetch-interval'; + type: string; + } + | { + /** number of consecutive GetByHash calls to the aggregator where each call will cause it to select from REST endpoints in order of best latency and success rate, before switching to explore mode (default 1000) */ + key: 'node.data-availability.rest-aggregator.simple-explore-exploit-strategy.exploit-iterations'; + type: number; + } + | { + /** number of consecutive GetByHash calls to the aggregator where each call will cause it to randomly select from REST endpoints until one returns successfully, before switching to exploit mode (default 20) */ + key: 'node.data-availability.rest-aggregator.simple-explore-exploit-strategy.explore-iterations'; + type: number; + } + | { + /** strategy to use to determine order and parallelism of calling REST endpoint URLs; valid options are 'simple-explore-exploit' (default "simple-explore-exploit") */ + key: 'node.data-availability.rest-aggregator.strategy'; + type: string; + } + | { + /** how frequently to update the strategy with endpoint latency and error rate data (default 10s) */ + key: 'node.data-availability.rest-aggregator.strategy-update-interval'; + type: string; + } + | { + /** check if the data already exists in this DAS's storage. Must be disabled for fast sync with an IPFS backend (default true) */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.check-already-exists'; + type: boolean; + } + | { + /** time to wait if encountered an error before retrying (default 1s) */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.delay-on-error'; + type: string; + } + | { + /** eagerly sync batch data to this DAS's storage from the rest endpoints, using L1 as the index of batch data hashes; otherwise only sync lazily */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.eager'; + type: boolean; + } + | { + /** when eagerly syncing, start indexing forward from this L1 block. Only used if there is no sync state */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.eager-lower-bound-block'; + type: number; + } + | { + /** log only on failures to write when syncing; otherwise treat it as an error (default true) */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.ignore-write-errors'; + type: boolean; + } + | { + /** when eagerly syncing, max l1 blocks to read per poll (default 100) */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.parent-chain-blocks-per-read'; + type: number; + } + | { + /** period to retain synced data (defaults to forever) (default 2562047h47m16.854775807s) */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.retention-period'; + type: string; + } + | { + /** directory to store the sync state in, ie the block number currently synced up to, so that we don't sync from scratch each time */ + key: 'node.data-availability.rest-aggregator.sync-to-storage.state-dir'; + type: string; + } + | { + /** list of URLs including 'http://' or 'https://' prefixes and port numbers to REST DAS endpoints; additive with the online-url-list option */ + key: 'node.data-availability.rest-aggregator.urls'; + type: string[]; + } + | { + /** time to wait until trying the next set of REST endpoints while waiting for a response; the next set of REST endpoints is determined by the strategy selected (default 2s) */ + key: 'node.data-availability.rest-aggregator.wait-before-try-next'; + type: string; + } + | { + /** Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful. */ + key: 'node.data-availability.rpc-aggregator.assumed-honest'; + type: number; + } + | { + /** JSON RPC backend configuration */ + key: 'node.data-availability.rpc-aggregator.backends'; + type: string; + } + | { + /** enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types */ + key: 'node.data-availability.rpc-aggregator.enable'; + type: boolean; + } + | { + /** parent chain address of SequencerInbox contract */ + key: 'node.data-availability.sequencer-inbox-address'; + type: string; + } + | { + /** enable delayed sequencer */ + key: 'node.delayed-sequencer.enable'; + type: boolean; + } + | { + /** how many blocks in the past L1 block is considered final (ignored when using Merge finality) (default 20) */ + key: 'node.delayed-sequencer.finalize-distance'; + type: number; + } + | { + /** whether to wait for full finality before sequencing delayed messages */ + key: 'node.delayed-sequencer.require-full-finality'; + type: boolean; + } + | { + /** whether to use The Merge's notion of finality before sequencing delayed messages (default true) */ + key: 'node.delayed-sequencer.use-merge-finality'; + type: boolean; + } + | { + /** enable per message deflate compression support (default true) */ + key: 'node.feed.input.enable-compression'; + type: boolean; + } + | { + /** initial duration to wait before reconnect (default 1s) */ + key: 'node.feed.input.reconnect-initial-backoff'; + type: string; + } + | { + /** maximum duration to wait before reconnect (default 1m4s) */ + key: 'node.feed.input.reconnect-maximum-backoff'; + type: string; + } + | { + /** require chain id to be present on connect */ + key: 'node.feed.input.require-chain-id'; + type: boolean; + } + | { + /** require feed version to be present on connect */ + key: 'node.feed.input.require-feed-version'; + type: boolean; + } + | { + /** list of secondary URLs of sequencer feed source. Would be started in the order they appear in the list when primary feeds fails */ + key: 'node.feed.input.secondary-url'; + type: string[]; + } + | { + /** duration to wait before timing out connection to sequencer feed (default 20s) */ + key: 'node.feed.input.timeout'; + type: string; + } + | { + /** list of primary URLs of sequencer feed source */ + key: 'node.feed.input.url'; + type: string[]; + } + | { + /** accept verified message from sequencer (default true) */ + key: 'node.feed.input.verify.accept-sequencer'; + type: boolean; + } + | { + /** a list of allowed addresses */ + key: 'node.feed.input.verify.allowed-addresses'; + type: string[]; + } + | { + /** accept empty as valid signature (default true) */ + key: 'node.feed.input.verify.dangerous.accept-missing'; + type: boolean; + } + | { + /** address to bind the relay feed output to */ + key: 'node.feed.output.addr'; + type: string; + } + | { + /** the maximum number of messages each segment within the backlog can contain (default 240) */ + key: 'node.feed.output.backlog.segment-limit'; + type: number; + } + | { + /** delay the first messages sent to each client by this amount */ + key: 'node.feed.output.client-delay'; + type: string; + } + | { + /** duration to wait before timing out connections to client (default 15s) */ + key: 'node.feed.output.client-timeout'; + type: string; + } + | { + /** enable broadcaster per-client connection limiting */ + key: 'node.feed.output.connection-limits.enable'; + type: boolean; + } + | { + /** limit clients, as identified by IPv4/v6 address, to this many connections to this relay (default 5) */ + key: 'node.feed.output.connection-limits.per-ip-limit'; + type: number; + } + | { + /** limit ipv6 clients, as identified by IPv6 address masked with /48, to this many connections to this relay (default 20) */ + key: 'node.feed.output.connection-limits.per-ipv6-cidr-48-limit'; + type: number; + } + | { + /** limit ipv6 clients, as identified by IPv6 address masked with /64, to this many connections to this relay (default 10) */ + key: 'node.feed.output.connection-limits.per-ipv6-cidr-64-limit'; + type: number; + } + | { + /** time to wait after a relay client disconnects before the disconnect is registered with respect to the limit for this client */ + key: 'node.feed.output.connection-limits.reconnect-cooldown-period'; + type: string; + } + | { + /** don't sign feed messages (default true) */ + key: 'node.feed.output.disable-signing'; + type: boolean; + } + | { + /** enable broadcaster */ + key: 'node.feed.output.enable'; + type: boolean; + } + | { + /** enable per message deflate compression support */ + key: 'node.feed.output.enable-compression'; + type: boolean; + } + | { + /** duration to wait before timing out HTTP to WS upgrade (default 1s) */ + key: 'node.feed.output.handshake-timeout'; + type: string; + } + | { + /** only supply catchup buffer if requested sequence number is reasonable */ + key: 'node.feed.output.limit-catchup'; + type: boolean; + } + | { + /** log every client connect */ + key: 'node.feed.output.log-connect'; + type: boolean; + } + | { + /** log every client disconnect */ + key: 'node.feed.output.log-disconnect'; + type: boolean; + } + | { + /** the maximum size of the catchup buffer (-1 means unlimited) (default -1) */ + key: 'node.feed.output.max-catchup'; + type: number; + } + | { + /** maximum number of messages allowed to accumulate before client is disconnected (default 4096) */ + key: 'node.feed.output.max-send-queue'; + type: number; + } + | { + /** duration for ping interval (default 5s) */ + key: 'node.feed.output.ping'; + type: string; + } + | { + /** port to bind the relay feed output to (default "9642") */ + key: 'node.feed.output.port'; + type: string; + } + | { + /** queue size for HTTP to WS upgrade (default 100) */ + key: 'node.feed.output.queue'; + type: number; + } + | { + /** duration to wait before timing out reading data (i.e. pings) from clients (default 1s) */ + key: 'node.feed.output.read-timeout'; + type: string; + } + | { + /** require clients to use compression */ + key: 'node.feed.output.require-compression'; + type: boolean; + } + | { + /** don't connect if client version not present */ + key: 'node.feed.output.require-version'; + type: boolean; + } + | { + /** sign broadcast messages */ + key: 'node.feed.output.signed'; + type: boolean; + } + | { + /** number of threads to reserve for HTTP to WS upgrade (default 100) */ + key: 'node.feed.output.workers'; + type: number; + } + | { + /** duration to wait before timing out writing data to clients (default 2s) */ + key: 'node.feed.output.write-timeout'; + type: string; + } + | { + /** the maximum time to wait between inbox checks (if not enough new blocks are found) (default 1m0s) */ + key: 'node.inbox-reader.check-delay'; + type: string; + } + | { + /** the default number of blocks to read at once (will vary based on traffic by default) (default 100) */ + key: 'node.inbox-reader.default-blocks-to-read'; + type: number; + } + | { + /** number of latest blocks to ignore to reduce reorgs */ + key: 'node.inbox-reader.delay-blocks'; + type: number; + } + | { + /** erase future transactions in addition to overwriting existing ones on reorg */ + key: 'node.inbox-reader.hard-reorg'; + type: boolean; + } + | { + /** if adjust-blocks-to-read is enabled, the maximum number of blocks to read at once (default 2000) */ + key: 'node.inbox-reader.max-blocks-to-read'; + type: number; + } + | { + /** the minimum number of blocks to read at once (when caught up lowers load on L1) (default 1) */ + key: 'node.inbox-reader.min-blocks-to-read'; + type: number; + } + | { + /** mode to only read latest or safe or finalized L1 blocks. Enabling safe or finalized disables feed input and output. Defaults to latest. Takes string input, valid strings- latest, safe, finalized (default "latest") */ + key: 'node.inbox-reader.read-mode'; + type: string; + } + | { + /** if adjust-blocks-to-read is enabled, the target number of messages to read at once (default 500) */ + key: 'node.inbox-reader.target-messages-read'; + type: number; + } + | { + /** should node always try grabing lock in background */ + key: 'node.maintenance.lock.background-lock'; + type: boolean; + } + | { + /** if false, always treat this as locked and don't write the lock to redis (default true) */ + key: 'node.maintenance.lock.enable'; + type: boolean; + } + | { + /** key for lock */ + key: 'node.maintenance.lock.key'; + type: string; + } + | { + /** how long lock is held (default 1m0s) */ + key: 'node.maintenance.lock.lockout-duration'; + type: string; + } + | { + /** this node's id prefix when acquiring the lock (optional) */ + key: 'node.maintenance.lock.my-id'; + type: string; + } + | { + /** how long between consecutive calls to redis (default 10s) */ + key: 'node.maintenance.lock.refresh-duration'; + type: string; + } + | { + /** UTC 24-hour time of day to run maintenance (currently only db compaction) at (e.g. 15:00) */ + key: 'node.maintenance.time-of-day'; + type: string; + } + | { + /** enable message pruning (default true) */ + key: 'node.message-pruner.enable'; + type: boolean; + } + | { + /** min number of batches not pruned (default 2) */ + key: 'node.message-pruner.min-batches-left'; + type: number; + } + | { + /** interval for running message pruner (default 1m0s) */ + key: 'node.message-pruner.prune-interval'; + type: string; + } + | { + /** Dangerous! only meant to be used by system tests */ + key: 'node.parent-chain-reader.dangerous.wait-for-tx-approval-safe-poll'; + type: string; + } + | { + /** enable reader connection (default true) */ + key: 'node.parent-chain-reader.enable'; + type: boolean; + } + | { + /** warns if the latest l1 block is at least this old (default 5m0s) */ + key: 'node.parent-chain-reader.old-header-timeout'; + type: string; + } + | { + /** interval when polling endpoint (default 15s) */ + key: 'node.parent-chain-reader.poll-interval'; + type: string; + } + | { + /** do not attempt to subscribe to header events */ + key: 'node.parent-chain-reader.poll-only'; + type: boolean; + } + | { + /** interval for subscribe error (default 5m0s) */ + key: 'node.parent-chain-reader.subscribe-err-interval'; + type: string; + } + | { + /** timeout when waiting for a transaction (default 5m0s) */ + key: 'node.parent-chain-reader.tx-timeout'; + type: string; + } + | { + /** use l1 data about finalized/safe blocks (default true) */ + key: 'node.parent-chain-reader.use-finality-data'; + type: boolean; + } + | { + /** if non-empty, launch an HTTP service binding to this address that returns status code 200 when chosen and 503 otherwise */ + key: 'node.seq-coordinator.chosen-healthcheck-addr'; + type: string; + } + | { + /** enable sequence coordinator */ + key: 'node.seq-coordinator.enable'; + type: boolean; + } + | { + /** the maximum amount of time to spend waiting for another sequencer to accept the lockout when handing it off on shutdown or db compaction (default 30s) */ + key: 'node.seq-coordinator.handoff-timeout'; + type: string; + } + | { + /** (default 1m0s) */ + key: 'node.seq-coordinator.lockout-duration'; + type: string; + } + | { + /** (default 30s) */ + key: 'node.seq-coordinator.lockout-spare'; + type: string; + } + | { + /** will only be marked as wanting the lockout if not too far behind (default 2000) */ + key: 'node.seq-coordinator.msg-per-poll'; + type: number; + } + | { + /** url for this sequencer if it is the chosen (default "") */ + key: 'node.seq-coordinator.my-url'; + type: string; + } + | { + /** the Redis URL to coordinate via */ + key: 'node.seq-coordinator.redis-url'; + type: string; + } + | { + /** the number of times to retry releasing the wants lockout and chosen one status on shutdown (default 4) */ + key: 'node.seq-coordinator.release-retries'; + type: number; + } + | { + /** (default 50ms) */ + key: 'node.seq-coordinator.retry-interval'; + type: string; + } + | { + /** if non-zero will add delay after transferring control (default 5s) */ + key: 'node.seq-coordinator.safe-shutdown-delay'; + type: string; + } + | { + /** (default 24h0m0s) */ + key: 'node.seq-coordinator.seq-num-duration'; + type: string; + } + | { + /** accept verified message from sequencer (default true) */ + key: 'node.seq-coordinator.signer.ecdsa.accept-sequencer'; + type: boolean; + } + | { + /** a list of allowed addresses */ + key: 'node.seq-coordinator.signer.ecdsa.allowed-addresses'; + type: string[]; + } + | { + /** accept empty as valid signature (default true) */ + key: 'node.seq-coordinator.signer.ecdsa.dangerous.accept-missing'; + type: boolean; + } + | { + /** if to fall back to symmetric hmac */ + key: 'node.seq-coordinator.signer.symmetric-fallback'; + type: boolean; + } + | { + /** if to sign with symmetric hmac */ + key: 'node.seq-coordinator.signer.symmetric-sign'; + type: boolean; + } + | { + /** disable message signature verification */ + key: 'node.seq-coordinator.signer.symmetric.dangerous.disable-signature-verification'; + type: boolean; + } + | { + /** a fallback key used for message verification */ + key: 'node.seq-coordinator.signer.symmetric.fallback-verification-key'; + type: string; + } + | { + /** a 32-byte (64-character) hex string used to sign messages, or a path to a file containing it */ + key: 'node.seq-coordinator.signer.symmetric.signing-key'; + type: string; + } + | { + /** (default 250ms) */ + key: 'node.seq-coordinator.update-interval'; + type: string; + } + | { + /** enable sequencer */ + key: 'node.sequencer'; + type: boolean; + } + | { + /** confirmation blocks (default 12) */ + key: 'node.staker.confirmation-blocks'; + type: number; + } + | { + /** validator smart contract wallet public address */ + key: 'node.staker.contract-wallet-address'; + type: string; + } + | { + /** DANGEROUS! make assertions even when the wasm module root is wrong */ + key: 'node.staker.dangerous.ignore-rollup-wasm-module-root'; + type: boolean; + } + | { + /** DANGEROUS! allows running an L1 validator without a block validator */ + key: 'node.staker.dangerous.without-block-validator'; + type: boolean; + } + | { + /** if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance (default true) */ + key: 'node.staker.data-poster.allocate-mempool-balance'; + type: boolean; + } + | { + /** comma-separated list of durations since first posting a blob transaction to attempt a replace-by-fee (default "5m,10m,30m,1h,4h,8h,16h,22h") */ + key: 'node.staker.data-poster.blob-tx-replacement-times'; + type: string; + } + | { + /** clear database storage */ + key: 'node.staker.data-poster.dangerous.clear-dbstorage'; + type: boolean; + } + | { + /** unit to measure the time elapsed since creation of transaction used for maximum fee cap calculation (default 10m0s) */ + key: 'node.staker.data-poster.elapsed-time-base'; + type: string; + } + | { + /** weight given to the units of time elapsed used for maximum fee cap calculation (default 10) */ + key: 'node.staker.data-poster.elapsed-time-importance'; + type: number; + } + | { + /** external signer address */ + key: 'node.staker.data-poster.external-signer.address'; + type: string; + } + | { + /** rpc client cert */ + key: 'node.staker.data-poster.external-signer.client-cert'; + type: string; + } + | { + /** rpc client private key */ + key: 'node.staker.data-poster.external-signer.client-private-key'; + type: string; + } + | { + /** external signer method (default "eth_signTransaction") */ + key: 'node.staker.data-poster.external-signer.method'; + type: string; + } + | { + /** external signer root CA */ + key: 'node.staker.data-poster.external-signer.root-ca'; + type: string; + } + | { + /** external signer url */ + key: 'node.staker.data-poster.external-signer.url'; + type: string; + } + | { + /** encodes items in a legacy way (as it was before dropping generics) */ + key: 'node.staker.data-poster.legacy-storage-encoding'; + type: boolean; + } + | { + /** the maximum tip cap to post EIP-4844 blob carrying transactions at (default 1) */ + key: 'node.staker.data-poster.max-blob-tx-tip-cap-gwei'; + type: number; + } + | { + /** the maximum multiple of the current price to bid for a transaction's fees (may be exceeded due to min rbf increase, 0 = unlimited) (default 100000) */ + key: 'node.staker.data-poster.max-fee-bid-multiple-bips'; + type: number; + } + | { + /** mathematical formula to calculate maximum fee cap gwei the result of which would be float64. This expression is expected to be evaluated please refer https://github.com/Knetic/govaluate/blob/master/MANUAL.md to find all available mathematical operators. Currently available variables to construct the formula are BacklogOfBatches, UrgencyGWei, ElapsedTime, ElapsedTimeBase, ElapsedTimeImportance, and TargetPriceGWei (default "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei") */ + key: 'node.staker.data-poster.max-fee-cap-formula'; + type: string; + } + | { + /** the maximum number of transactions to have queued in the mempool at once (0 = unlimited) (default 1) */ + key: 'node.staker.data-poster.max-mempool-transactions'; + type: number; + } + | { + /** the maximum number of weight (weight = min(1, tx.blobs)) to have queued in the mempool at once (0 = unlimited) (default 1) */ + key: 'node.staker.data-poster.max-mempool-weight'; + type: number; + } + | { + /** the maximum number of unconfirmed transactions to track at once (0 = unlimited) */ + key: 'node.staker.data-poster.max-queued-transactions'; + type: number; + } + | { + /** the maximum tip cap to post transactions at (default 5) */ + key: 'node.staker.data-poster.max-tip-cap-gwei'; + type: number; + } + | { + /** the minimum tip cap to post EIP-4844 blob carrying transactions at (default 1) */ + key: 'node.staker.data-poster.min-blob-tx-tip-cap-gwei'; + type: number; + } + | { + /** the minimum tip cap to post transactions at (default 0.05) */ + key: 'node.staker.data-poster.min-tip-cap-gwei'; + type: number; + } + | { + /** the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee (default 1) */ + key: 'node.staker.data-poster.nonce-rbf-soft-confs'; + type: number; + } + | { + /** disable message signature verification */ + key: 'node.staker.data-poster.redis-signer.dangerous.disable-signature-verification'; + type: boolean; + } + | { + /** a fallback key used for message verification */ + key: 'node.staker.data-poster.redis-signer.fallback-verification-key'; + type: string; + } + | { + /** a 32-byte (64-character) hex string used to sign messages, or a path to a file containing it */ + key: 'node.staker.data-poster.redis-signer.signing-key'; + type: string; + } + | { + /** comma-separated list of durations since first posting to attempt a replace-by-fee (default "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h") */ + key: 'node.staker.data-poster.replacement-times'; + type: string; + } + | { + /** the target price to use for maximum fee cap calculation (default 60) */ + key: 'node.staker.data-poster.target-price-gwei'; + type: number; + } + | { + /** the urgency to use for maximum fee cap calculation (default 2) */ + key: 'node.staker.data-poster.urgency-gwei'; + type: number; + } + | { + /** uses database storage when enabled (default true) */ + key: 'node.staker.data-poster.use-db-storage'; + type: boolean; + } + | { + /** uses noop storage, it doesn't store anything */ + key: 'node.staker.data-poster.use-noop-storage'; + type: boolean; + } + | { + /** only treat a transaction as confirmed after L1 finality has been achieved (recommended) (default true) */ + key: 'node.staker.data-poster.wait-for-l1-finality'; + type: boolean; + } + | { + /** disable validator challenge */ + key: 'node.staker.disable-challenge'; + type: boolean; + } + | { + /** enable validator (default true) */ + key: 'node.staker.enable'; + type: boolean; + } + | { + /** use this much more gas than estimation says is necessary to post transactions (default 50000) */ + key: 'node.staker.extra-gas'; + type: number; + } + | { + /** The gas refunder contract address (optional) */ + key: 'node.staker.gas-refunder-address'; + type: string; + } + | { + /** if configured with the makeNodes strategy, how often to create new assertions (bypassed in case of a dispute) (default 1h0m0s) */ + key: 'node.staker.make-assertion-interval'; + type: string; + } + | { + /** only create smart wallet contract and exit */ + key: 'node.staker.only-create-wallet-contract'; + type: boolean; + } + | { + /** account to use (default is first account in keystore) */ + key: 'node.staker.parent-chain-wallet.account'; + type: string; + } + | { + /** if true, creates new key then exits */ + key: 'node.staker.parent-chain-wallet.only-create-key'; + type: boolean; + } + | { + /** wallet passphrase (default "PASSWORD_NOT_SET") */ + key: 'node.staker.parent-chain-wallet.password'; + type: string; + } + | { + /** pathname for wallet (default "validator-wallet") */ + key: 'node.staker.parent-chain-wallet.pathname'; + type: string; + } + | { + /** private key for wallet */ + key: 'node.staker.parent-chain-wallet.private-key'; + type: string; + } + | { + /** high gas delay blocks */ + key: 'node.staker.posting-strategy.high-gas-delay-blocks'; + type: number; + } + | { + /** high gas threshold */ + key: 'node.staker.posting-strategy.high-gas-threshold'; + type: number; + } + | { + /** redis url for L1 validator */ + key: 'node.staker.redis-url'; + type: string; + } + | { + /** how often the L1 validator should check the status of the L1 rollup and maybe take action with its stake (default 1m0s) */ + key: 'node.staker.staker-interval'; + type: string; + } + | { + /** assume staked nodes are valid (default true) */ + key: 'node.staker.start-validation-from-staked'; + type: boolean; + } + | { + /** L1 validator strategy, either watchtower, defensive, stakeLatest, or makeNodes (default "Watchtower") */ + key: 'node.staker.strategy'; + type: string; + } + | { + /** use a smart contract wallet instead of an EOA address */ + key: 'node.staker.use-smart-contract-wallet'; + type: boolean; + } + | { + /** allowed lag between messages read and blocks built (default 20) */ + key: 'node.sync-monitor.block-build-lag'; + type: number; + } + | { + /** allowed lag between messages read from sequencer inbox and blocks built */ + key: 'node.sync-monitor.block-build-sequencer-inbox-lag'; + type: number; + } + | { + /** allowed lag between local and remote messages (default 15) */ + key: 'node.sync-monitor.coordinator-msg-lag'; + type: number; + } + | { + /** wait for block validator to complete before returning finalized block number */ + key: 'node.sync-monitor.finalized-block-wait-for-block-validator'; + type: boolean; + } + | { + /** wait for block validator to complete before returning safe block number */ + key: 'node.sync-monitor.safe-block-wait-for-block-validator'; + type: boolean; + } + | { + /** delay when polling calls to execute messages (default 100ms) */ + key: 'node.transaction-streamer.execute-message-loop-delay'; + type: string; + } + | { + /** maximum cache of pending broadcaster messages (default 50000) */ + key: 'node.transaction-streamer.max-broadcaster-queue-size'; + type: number; + } + | { + /** maximum number of messages to attempt to resequence on reorg (0 = never resequence, -1 = always resequence) (default 1024) */ + key: 'node.transaction-streamer.max-reorg-resequence-depth'; + type: number; + } + | { + /** P2P bootnodes */ + key: 'p2p.bootnodes'; + type: string[]; + } + | { + /** P2P bootnodes v5 */ + key: 'p2p.bootnodes-v5'; + type: string[]; + } + | { + /** P2P discovery v4 */ + key: 'p2p.discovery-v4'; + type: boolean; + } + | { + /** P2P discovery v5 */ + key: 'p2p.discovery-v5'; + type: boolean; + } + | { + /** P2P listen address */ + key: 'p2p.listen-addr'; + type: string; + } + | { + /** P2P max peers (default 50) */ + key: 'p2p.max-peers'; + type: number; + } + | { + /** P2P no dial (default true) */ + key: 'p2p.no-dial'; + type: boolean; + } + | { + /** P2P no discovery (default true) */ + key: 'p2p.no-discovery'; + type: boolean; + } + | { + /** Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters */ + key: 'parent-chain.blob-client.authorization'; + type: string; + } + | { + /** Beacon Chain RPC URL to use for fetching blobs (normally on port 3500) */ + key: 'parent-chain.blob-client.beacon-url'; + type: string; + } + | { + /** Full path of the directory to save fetched blobs */ + key: 'parent-chain.blob-client.blob-directory'; + type: string; + } + | { + /** Backup beacon Chain RPC URL to use for fetching blobs (normally on port 3500) when unable to fetch from primary */ + key: 'parent-chain.blob-client.secondary-beacon-url'; + type: string; + } + | { + /** limit size of arguments in log entries (default 2048) */ + key: 'parent-chain.connection.arg-log-limit'; + type: number; + } + | { + /** how long to wait for initial connection (default 1m0s) */ + key: 'parent-chain.connection.connection-wait'; + type: string; + } + | { + /** path to file with jwtsecret for validation - ignored if url is self or self-auth */ + key: 'parent-chain.connection.jwtsecret'; + type: string; + } + | { + /** number of retries in case of failure(0 mean one attempt) (default 2) */ + key: 'parent-chain.connection.retries'; + type: number; + } + | { + /** delay between retries */ + key: 'parent-chain.connection.retry-delay'; + type: string; + } + | { + /** Errors matching this regular expression are automatically retried */ + key: 'parent-chain.connection.retry-errors'; + type: string; + } + | { + /** per-response timeout (0-disabled) (default 1m0s) */ + key: 'parent-chain.connection.timeout'; + type: string; + } + | { + /** url of server, use self for loopback websocket, self-auth for loopback with authentication */ + key: 'parent-chain.connection.url'; + type: string; + } + | { + /** if set other than 0, will be used to validate database and L1 connection */ + key: 'parent-chain.id'; + type: number; + } + | { + /** account to use (default is first account in keystore) */ + key: 'parent-chain.wallet.account'; + type: string; + } + | { + /** if true, creates new key then exits */ + key: 'parent-chain.wallet.only-create-key'; + type: boolean; + } + | { + /** wallet passphrase (default "PASSWORD_NOT_SET") */ + key: 'parent-chain.wallet.password'; + type: string; + } + | { + /** pathname for wallet (default "wallet") */ + key: 'parent-chain.wallet.pathname'; + type: string; + } + | { + /** private key for wallet */ + key: 'parent-chain.wallet.private-key'; + type: string; + } + | { + /** directory of ancient where the chain freezer can be opened */ + key: 'persistent.ancient'; + type: string; + } + | { + /** directory to store chain state */ + key: 'persistent.chain'; + type: string; + } + | { + /** backing database implementation to use ('leveldb' or 'pebble') (default "leveldb") */ + key: 'persistent.db-engine'; + type: string; + } + | { + /** directory to store global config (default ".arbitrum") */ + key: 'persistent.global-config'; + type: string; + } + | { + /** number of file descriptor handles to use for the database (default 512) */ + key: 'persistent.handles'; + type: number; + } + | { + /** directory to store log file */ + key: 'persistent.log-dir'; + type: string; + } + | { + /** enable pprof */ + key: 'pprof'; + type: boolean; + } + | { + /** pprof server address (default "127.0.0.1") */ + key: 'pprof-cfg.addr'; + type: string; + } + | { + /** pprof server port (default 6071) */ + key: 'pprof-cfg.port'; + type: number; + } + | { + /** the maximum number of requests in a batch (0 means no limit) (default 1000) */ + key: 'rpc.batch-request-limit'; + type: number; + } + | { + /** the maximum response size for a JSON-RPC request measured in bytes (0 means no limit) (default 10000000) */ + key: 'rpc.max-batch-response-size'; + type: number; + } + | { + /** validate is an authenticated API (default true) */ + key: 'validation.api-auth'; + type: boolean; + } + | { + /** validate is a public API */ + key: 'validation.api-public'; + type: boolean; + } + | { + /** timeout before discarding execution run (default 15m0s) */ + key: 'validation.arbitrator.execution-run-timeout'; + type: string; + } + | { + /** how many machines to store in cache while working on a challenge (should be even) (default 4) */ + key: 'validation.arbitrator.execution.cached-challenge-machines'; + type: number; + } + | { + /** initial steps between machines (default 100000) */ + key: 'validation.arbitrator.execution.initial-steps'; + type: number; + } + | { + /** path to write machines to (default "./target/output") */ + key: 'validation.arbitrator.output-path'; + type: string; + } + | { + /** number of concurrent validation threads */ + key: 'validation.arbitrator.workers'; + type: number; + } + | { + /** use Cranelift instead of LLVM when validating blocks using the jit-accelerated block validator (default true) */ + key: 'validation.jit.cranelift'; + type: boolean; + } + | { + /** if memory used by a jit wasm exceeds this limit, a warning is logged (default 4294967296) */ + key: 'validation.jit.wasm-memory-usage-limit'; + type: number; + } + | { + /** number of concurrent validation threads */ + key: 'validation.jit.workers'; + type: number; + } + | { + /** use jit for validation (default true) */ + key: 'validation.use-jit'; + type: boolean; + } + | { + /** list of WASM module roots to check if the on-chain WASM module root belongs to on node startup */ + key: 'validation.wasm.allowed-wasm-module-roots'; + type: string[]; + } + | { + /** enable check for compatibility of on-chain WASM module root with node (default true) */ + key: 'validation.wasm.enable-wasmroots-check'; + type: boolean; + } + | { + /** path to machine folders, each containing wasm files (machine.wavm.br, replay.wasm) */ + key: 'validation.wasm.root-path'; + type: string; + } + | { + /** WS-RPC server listening interface */ + key: 'ws.addr'; + type: string; + } + | { + /** APIs offered over the WS-RPC interface (default [net,web3,eth,arb]) */ + key: 'ws.api'; + type: string[]; + } + | { + /** expose private api via websocket */ + key: 'ws.expose-all'; + type: boolean; + } + | { + /** Origins from which to accept websockets requests */ + key: 'ws.origins'; + type: string[]; + } + | { + /** WS-RPC server listening port (default 8548) */ + key: 'ws.port'; + type: number; + } + | { + /** WS path path prefix on which JSON-RPC is served. Use '/' to serve on all paths */ + key: 'ws.rpcprefix'; + type: string; + }; diff --git a/src/types/NodeConfig.ts b/src/types/NodeConfig.ts index ac7bcf06..b23c9198 100644 --- a/src/types/NodeConfig.ts +++ b/src/types/NodeConfig.ts @@ -19,10 +19,8 @@ export type NodeConfigChainInfoJson = [ }, ]; -export type NodeConfigDataAvailabilityRpcAggregatorBackendsJson = [ - { - url: string; - pubkey: string; - signermask: number; - }, -]; +export type NodeConfigDataAvailabilityRpcAggregatorBackendsJson = { + url: string; + pubkey: string; + signermask: number; +}[]; diff --git a/yarn.lock b/yarn.lock index f6ae0e4a..5a92aa62 100644 --- a/yarn.lock +++ b/yarn.lock @@ -760,6 +760,18 @@ resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.3.10.tgz#2ad2959d1767edee5b0e4efb1a0cd2b500747317" integrity sha512-of+ICnbqjmFCiixUnqRulbylyXQrPqIGf/B3Jax1wIF3DvSheysQxAWvqHhZiW3IQrycvokcLcFQlveGp+vyNg== +"@types/lodash.set@^4.3.9": + version "4.3.9" + resolved "https://registry.yarnpkg.com/@types/lodash.set/-/lodash.set-4.3.9.tgz#55d95bce407b42c6655f29b2d0811fd428e698f0" + integrity sha512-KOxyNkZpbaggVmqbpr82N2tDVTx05/3/j0f50Es1prxrWB0XYf9p3QNxqcbWb7P1Q9wlvsUSlCFnwlPCIJ46PQ== + dependencies: + "@types/lodash" "*" + +"@types/lodash@*": + version "4.17.0" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.17.0.tgz#d774355e41f372d5350a4d0714abb48194a489c3" + integrity sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA== + "@types/node@*", "@types/node@^20.9.0": version "20.9.0" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.0.tgz#bfcdc230583aeb891cf51e73cfdaacdd8deae298" @@ -2284,6 +2296,11 @@ locate-path@^7.1.0: dependencies: p-locate "^6.0.0" +lodash.set@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/lodash.set/-/lodash.set-4.3.2.tgz#d8757b1da807dde24816b0d6a84bea1a76230b23" + integrity sha512-4hNPN5jlm/N/HLMCO43v8BXKq9Z7QdAGc/VGrRD61w8gN9g/6jF9A4L1pbUgBLCffi0w9VsXfTOij5x8iTyFvg== + log-symbols@^5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-5.1.0.tgz#a20e3b9a5f53fac6aeb8e2bb22c07cf2c8f16d93"