diff --git a/.github/workflows/ci-rocksdb-build.yml b/.github/workflows/ci-rocksdb-build.yml new file mode 100644 index 0000000000..fea7f0c5e5 --- /dev/null +++ b/.github/workflows/ci-rocksdb-build.yml @@ -0,0 +1,47 @@ +name: Continuous Integration (Rocksdb Build) + +env: + ROCKSDB_VERSION: v8.1.1 + +on: + workflow_call: +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: checkout repo from current commit + uses: actions/checkout@v3 + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.20" + check-latest: true + cache: true + - name: build rocksdb dependency + run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh + - name: build application + run: make build COSMOS_BUILD_OPTIONS=rocksdb + test: + runs-on: ubuntu-latest + steps: + - name: install RocksDB dependencies + run: sudo apt-get update + && sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev + - name: install RocksDB as shared library + run: git clone https://github.com/facebook/rocksdb.git + && cd rocksdb + && git checkout $ROCKSDB_VERSION + && sudo make -j$(nproc) install-shared + && sudo ldconfig + - name: checkout repo from current commit + uses: actions/checkout@v3 + with: + submodules: true + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.20" + check-latest: true + cache: true + - name: run unit tests + run: make test-rocksdb diff --git a/Dockerfile-rocksdb b/Dockerfile-rocksdb new file mode 100644 index 0000000000..200fd6e5f7 --- /dev/null +++ b/Dockerfile-rocksdb @@ -0,0 +1,47 @@ +FROM golang:1.20-bullseye AS kava-builder + +# Set up dependencies +RUN apt-get update \ + && apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory for the build +WORKDIR /root +# default home directory is /root + +# install rocksdb +ARG rocksdb_version=v8.1.1 +ENV ROCKSDB_VERSION=$rocksdb_version + +RUN git clone https://github.com/facebook/rocksdb.git \ + && cd rocksdb \ + && git checkout $ROCKSDB_VERSION \ + && make -j$(nproc) install-shared \ + && ldconfig + +# Add source files for kava +COPY . kava + +ARG kava_database_backend=rocksdb +ENV KAVA_DATABASE_BACKEND=$kava_database_backend + +# Mount go build and mod caches as container caches, persisted between builder invocations +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + cd kava && make install COSMOS_BUILD_OPTIONS=$KAVA_DATABASE_BACKEND + + +FROM ubuntu:22.04 + +RUN apt-get update \ + && apt-get install -y libsnappy1v5 libgflags2.2 zlib1g libbz2-1.0 curl jq \ + && rm -rf /var/lib/apt/lists/* + +# copy rocksdb shared objects +COPY --from=kava-builder /usr/local/lib/ /usr/local/lib/ +RUN ldconfig + +# copy kava binary +COPY --from=kava-builder /go/bin/kava /bin/kava + +CMD ["kava"] diff --git a/Makefile b/Makefile index 18f0c22c67..afe0294243 100644 --- a/Makefile +++ b/Makefile @@ -278,6 +278,9 @@ test-basic: test test: @go test $$(go list ./... | grep -v 'contrib') +test-rocksdb: + @go test -tags=rocksdb ./cmd/kava/opendb + # Run cli integration tests # `-p 4` to use 4 cores, `-tags cli_test` to tell go not to ignore the cli package # These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set. diff --git a/cmd/kava/cmd/root.go b/cmd/kava/cmd/root.go index e195c8c491..338399d9ee 100644 --- a/cmd/kava/cmd/root.go +++ b/cmd/kava/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/kava-labs/kava/app" "github.com/kava-labs/kava/app/params" kavaclient "github.com/kava-labs/kava/client" + "github.com/kava-labs/kava/cmd/kava/opendb" "github.com/kava-labs/kava/migrate" ) @@ -100,8 +101,18 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de encodingConfig: encodingConfig, } + opts := ethermintserver.StartOptions{ + AppCreator: ac.newApp, + DefaultNodeHome: app.DefaultNodeHome, + DBOpener: opendb.OpenDB, + } // ethermintserver adds additional flags to start the JSON-RPC server for evm support - ethermintserver.AddCommands(rootCmd, defaultNodeHome, ac.newApp, ac.appExport, ac.addStartCmdFlags) + ethermintserver.AddCommands( + rootCmd, + opts, + ac.appExport, + ac.addStartCmdFlags, + ) // add keybase, auxiliary RPC, query, and tx child commands rootCmd.AddCommand( diff --git a/cmd/kava/opendb/metrics.go b/cmd/kava/opendb/metrics.go new file mode 100644 index 0000000000..a326fceb3c --- /dev/null +++ b/cmd/kava/opendb/metrics.go @@ -0,0 +1,166 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true +var rocksdbMetrics *Metrics + +// Metrics contains all rocksdb metrics which will be reported to prometheus +type Metrics struct { + // Keys + NumberKeysWritten metrics.Gauge + NumberKeysRead metrics.Gauge + NumberKeysUpdated metrics.Gauge + EstimateNumKeys metrics.Gauge + + // Files + NumberFileOpens metrics.Gauge + NumberFileErrors metrics.Gauge + + // Memory + BlockCacheUsage metrics.Gauge + EstimateTableReadersMem metrics.Gauge + CurSizeAllMemTables metrics.Gauge + BlockCachePinnedUsage metrics.Gauge + + // Cache + BlockCacheMiss metrics.Gauge + BlockCacheHit metrics.Gauge + BlockCacheAdd metrics.Gauge + BlockCacheAddFailures metrics.Gauge +} + +// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable +func registerMetrics() { + if rocksdbMetrics != nil { + // metrics already registered + return + } + + labels := make([]string, 0) + rocksdbMetrics = &Metrics{ + // Keys + NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "key", + Name: "number_keys_written", + Help: "", + }, labels), + NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "key", + Name: "number_keys_read", + Help: "", + }, labels), + NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "key", + Name: "number_keys_updated", + Help: "", + }, labels), + EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "key", + Name: "estimate_num_keys", + Help: "estimated number of total keys in the active and unflushed immutable memtables and storage", + }, labels), + + // Files + NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "file", + Name: "number_file_opens", + Help: "", + }, labels), + NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "file", + Name: "number_file_errors", + Help: "", + }, labels), + + // Memory + BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "memory", + Name: "block_cache_usage", + Help: "memory size for the entries residing in block cache", + }, labels), + EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "memory", + Name: "estimate_table_readers_mem", + Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)", + }, labels), + CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "memory", + Name: "cur_size_all_mem_tables", + Help: "approximate size of active and unflushed immutable memtables (bytes)", + }, labels), + BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "memory", + Name: "block_cache_pinned_usage", + Help: "returns the memory size for the entries being pinned", + }, labels), + + // Cache + BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "cache", + Name: "block_cache_miss", + Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss", + }, labels), + BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "cache", + Name: "block_cache_hit", + Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit", + }, labels), + BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "cache", + Name: "block_cache_add", + Help: "number of blocks added to block cache", + }, labels), + BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: "rocksdb", + Subsystem: "cache", + Name: "block_cache_add_failures", + Help: "number of failures when adding blocks to block cache", + }, labels), + } +} + +// report reports metrics to prometheus based on rocksdb props and stats +func (m *Metrics) report(props *properties, stats *stats) { + // Keys + m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten)) + m.NumberKeysRead.Set(float64(stats.NumberKeysRead)) + m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated)) + m.EstimateNumKeys.Set(float64(props.EstimateNumKeys)) + + // Files + m.NumberFileOpens.Set(float64(stats.NumberFileOpens)) + m.NumberFileErrors.Set(float64(stats.NumberFileErrors)) + + // Memory + m.BlockCacheUsage.Set(float64(props.BlockCacheUsage)) + m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem)) + m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables)) + m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage)) + + // Cache + m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss)) + m.BlockCacheHit.Set(float64(stats.BlockCacheHit)) + m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd)) + m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures)) +} diff --git a/cmd/kava/opendb/opendb.go b/cmd/kava/opendb/opendb.go new file mode 100644 index 0000000000..367dfd3f04 --- /dev/null +++ b/cmd/kava/opendb/opendb.go @@ -0,0 +1,18 @@ +//go:build !rocksdb +// +build !rocksdb + +package opendb + +import ( + "path/filepath" + + "github.com/cosmos/cosmos-sdk/server/types" + dbm "github.com/tendermint/tm-db" +) + +// OpenDB is a copy of default DBOpener function used by ethermint, see for details: +// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647 +func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) { + dataDir := filepath.Join(home, "data") + return dbm.NewDB("application", backendType, dataDir) +} diff --git a/cmd/kava/opendb/opendb_rocksdb.go b/cmd/kava/opendb/opendb_rocksdb.go new file mode 100644 index 0000000000..35368c6284 --- /dev/null +++ b/cmd/kava/opendb/opendb_rocksdb.go @@ -0,0 +1,378 @@ +//go:build rocksdb +// +build rocksdb + +// Copyright 2023 Kava Labs, Inc. +// Copyright 2023 Cronos Labs, Inc. +// +// Derived from https://github.com/crypto-org-chain/cronos@496ce7e +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opendb + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/cosmos/cosmos-sdk/server/types" + "github.com/linxGnu/grocksdb" + "github.com/spf13/cast" + dbm "github.com/tendermint/tm-db" +) + +var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default") + +const ( + // default tm-db block cache size for RocksDB + defaultBlockCacheSize = 1 << 30 + + defaultColumnFamilyName = "default" + + enableMetricsOptName = "rocksdb.enable-metrics" + reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs" + defaultReportMetricsIntervalSecs = 15 + + maxOpenFilesDBOptName = "rocksdb.max-open-files" + maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads" + tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits" + allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes" + allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads" + useFsyncDBOptName = "rocksdb.use_fsync" + useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex" + bytesPerSyncDBOptName = "rocksdb.bytes_per_sync" + + writeBufferSizeCFOptName = "rocksdb.write-buffer-size" + numLevelsCFOptName = "rocksdb.num-levels" + maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number" + minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge" + maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base" + maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier" + targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base" + targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier" + level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger" + level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger" + + blockCacheSizeBBTOOptName = "rocksdb.block_cache_size" + bitsPerKeyBBTOOptName = "rocksdb.bits_per_key" + blockSizeBBTOOptName = "rocksdb.block_size" + cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks" + pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache" + formatVersionBBTOOptName = "rocksdb.format_version" +) + +func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) { + dataDir := filepath.Join(home, "data") + if backendType == dbm.RocksDBBackend { + return openRocksdb(filepath.Join(dataDir, "application.db"), appOpts) + } + + return dbm.NewDB("application", backendType, dataDir) +} + +// openRocksdb loads existing options, overrides some of them with appOpts and opens database +// option will be overridden only in case if it explicitly specified in appOpts +func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) { + dbOpts, cfOpts, err := loadLatestOptions(dir) + if err != nil { + return nil, err + } + // customize rocksdb options + bbtoOpts := bbtoFromAppOpts(appOpts) + dbOpts.SetBlockBasedTableFactory(bbtoOpts) + cfOpts.SetBlockBasedTableFactory(bbtoOpts) + dbOpts = overrideDBOpts(dbOpts, appOpts) + cfOpts = overrideCFOpts(cfOpts, appOpts) + + enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName)) + reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName)) + if reportMetricsIntervalSecs == 0 { + reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs + } + + return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, enableMetrics, reportMetricsIntervalSecs) +} + +// loadLatestOptions loads and returns database and column family options +// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned +// if database exists it should have only one column family named default +func loadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) { + latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize)) + if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") { + return newDefaultOptions(), newDefaultOptions(), nil + } + if err != nil { + return nil, nil, err + } + + cfNames := latestOpts.ColumnFamilyNames() + cfOpts := latestOpts.ColumnFamilyOpts() + // db should have only one column family named default + ok := len(cfNames) == 1 && cfNames[0] == defaultColumnFamilyName + if !ok { + return nil, nil, ErrUnexpectedConfiguration + } + + // return db and cf opts + return latestOpts.Options(), &cfOpts[0], nil +} + +// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence +func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options { + maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName) + if maxOpenFiles != nil { + dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles)) + } + + maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName) + if maxFileOpeningThreads != nil { + dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads)) + } + + tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName) + if tableCacheNumshardbits != nil { + dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits)) + } + + allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName) + if allowMMAPWrites != nil { + dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites)) + } + + allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName) + if allowMMAPReads != nil { + dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads)) + } + + useFsync := appOpts.Get(useFsyncDBOptName) + if useFsync != nil { + dbOpts.SetUseFsync(cast.ToBool(useFsync)) + } + + useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName) + if useAdaptiveMutex != nil { + dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex)) + } + + bytesPerSync := appOpts.Get(bytesPerSyncDBOptName) + if bytesPerSync != nil { + dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync)) + } + + return dbOpts +} + +// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence +func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options { + writeBufferSize := appOpts.Get(writeBufferSizeCFOptName) + if writeBufferSize != nil { + cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize)) + } + + numLevels := appOpts.Get(numLevelsCFOptName) + if numLevels != nil { + cfOpts.SetNumLevels(cast.ToInt(numLevels)) + } + + maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName) + if maxWriteBufferNumber != nil { + cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber)) + } + + minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName) + if minWriteBufferNumberToMerge != nil { + cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge)) + } + + maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName) + if maxBytesForLevelBase != nil { + cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase)) + } + + maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName) + if maxBytesForLevelMultiplier != nil { + cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier)) + } + + targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName) + if targetFileSizeBase != nil { + cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase)) + } + + targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName) + if targetFileSizeMultiplier != nil { + cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier)) + } + + level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName) + if level0FileNumCompactionTrigger != nil { + cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger)) + } + + level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName) + if level0SlowdownWritesTrigger != nil { + cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger)) + } + + return cfOpts +} + +func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions { + bbto := defaultBBTO() + + blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName) + if blockCacheSize != nil { + cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize)) + bbto.SetBlockCache(cache) + } + + bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName) + if bitsPerKey != nil { + filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey)) + bbto.SetFilterPolicy(filter) + } + + blockSize := appOpts.Get(blockSizeBBTOOptName) + if blockSize != nil { + bbto.SetBlockSize(cast.ToInt(blockSize)) + } + + cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName) + if cacheIndexAndFilterBlocks != nil { + bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks)) + } + + pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName) + if pinL0FilterAndIndexBlocksInCache != nil { + bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache)) + } + + formatVersion := appOpts.Get(formatVersionBBTOOptName) + if formatVersion != nil { + bbto.SetFormatVersion(cast.ToInt(formatVersion)) + } + + return bbto +} + +// newRocksDBWithOptions opens rocksdb with provided database and column family options +// newRocksDBWithOptions expects that db has only one column family named default +func newRocksDBWithOptions( + name string, + dir string, + dbOpts *grocksdb.Options, + cfOpts *grocksdb.Options, + enableMetrics bool, + reportMetricsIntervalSecs int64, +) (*dbm.RocksDB, error) { + dbPath := filepath.Join(dir, name+".db") + + // Ensure path exists + if err := os.MkdirAll(dbPath, 0755); err != nil { + return nil, fmt.Errorf("failed to create db path: %w", err) + } + + // EnableStatistics adds overhead so shouldn't be enabled in production + if enableMetrics { + dbOpts.EnableStatistics() + } + + db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{defaultColumnFamilyName}, []*grocksdb.Options{cfOpts}) + if err != nil { + return nil, err + } + + if enableMetrics { + registerMetrics() + go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs)) + } + + ro := grocksdb.NewDefaultReadOptions() + wo := grocksdb.NewDefaultWriteOptions() + woSync := grocksdb.NewDefaultWriteOptions() + woSync.SetSync(true) + return dbm.NewRocksDBWithRawDB(db, ro, wo, woSync), nil +} + +// newDefaultOptions returns default tm-db options for RocksDB, see for details: +// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30 +func newDefaultOptions() *grocksdb.Options { + // default rocksdb option, good enough for most cases, including heavy workloads. + // 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads). + // compression: snappy as default, need to -lsnappy to enable. + bbto := defaultBBTO() + + opts := grocksdb.NewDefaultOptions() + opts.SetBlockBasedTableFactory(bbto) + // SetMaxOpenFiles to 4096 seems to provide a reliable performance boost + opts.SetMaxOpenFiles(4096) + opts.SetCreateIfMissing(true) + opts.IncreaseParallelism(runtime.NumCPU()) + // 1.5GB maximum memory use for writebuffer. + opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) + + return opts +} + +// defaultBBTO returns default tm-db bbto options for RocksDB, see for details: +// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30 +func defaultBBTO() *grocksdb.BlockBasedTableOptions { + bbto := grocksdb.NewDefaultBlockBasedTableOptions() + bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize)) + bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10)) + + return bbto +} + +// reportMetrics periodically requests stats from rocksdb and reports to prometheus +// NOTE: should be launched as a goroutine +func reportMetrics(db *grocksdb.DB, interval time.Duration) { + ticker := time.NewTicker(interval) + for { + select { + case <-ticker.C: + props, stats, err := getPropsAndStats(db) + if err != nil { + continue + } + + rocksdbMetrics.report(props, stats) + } + } +} + +// getPropsAndStats gets statistics from rocksdb +func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) { + propsLoader := newPropsLoader(db) + props, err := propsLoader.load() + if err != nil { + return nil, nil, err + } + + statMap, err := parseSerializedStats(props.OptionsStatistics) + if err != nil { + return nil, nil, err + } + + statLoader := newStatLoader(statMap) + stats, err := statLoader.load() + if err != nil { + return nil, nil, err + } + + return props, stats, nil +} diff --git a/cmd/kava/opendb/opendb_rocksdb_test.go b/cmd/kava/opendb/opendb_rocksdb_test.go new file mode 100644 index 0000000000..e8d1bb2916 --- /dev/null +++ b/cmd/kava/opendb/opendb_rocksdb_test.go @@ -0,0 +1,357 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "os" + "path/filepath" + "testing" + + "github.com/linxGnu/grocksdb" + "github.com/stretchr/testify/require" +) + +type mockAppOptions struct { + opts map[string]interface{} +} + +func newMockAppOptions(opts map[string]interface{}) *mockAppOptions { + return &mockAppOptions{ + opts: opts, + } +} + +func (m *mockAppOptions) Get(key string) interface{} { + return m.opts[key] +} + +func TestOpenRocksdb(t *testing.T) { + t.Run("db already exists", func(t *testing.T) { + defaultOpts := newDefaultOptions() + + for _, tc := range []struct { + desc string + mockAppOptions *mockAppOptions + maxOpenFiles int + maxFileOpeningThreads int + writeBufferSize uint64 + numLevels int + }{ + { + desc: "default options", + mockAppOptions: newMockAppOptions(map[string]interface{}{}), + maxOpenFiles: defaultOpts.GetMaxOpenFiles(), + maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(), + writeBufferSize: defaultOpts.GetWriteBufferSize(), + numLevels: defaultOpts.GetNumLevels(), + }, + { + desc: "change 2 options", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + maxOpenFilesDBOptName: 999, + writeBufferSizeCFOptName: 999_999, + }), + maxOpenFiles: 999, + maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(), + writeBufferSize: 999_999, + numLevels: defaultOpts.GetNumLevels(), + }, + { + desc: "change 4 options", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + maxOpenFilesDBOptName: 999, + maxFileOpeningThreadsDBOptName: 9, + writeBufferSizeCFOptName: 999_999, + numLevelsCFOptName: 9, + }), + maxOpenFiles: 999, + maxFileOpeningThreads: 9, + writeBufferSize: 999_999, + numLevels: 9, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + dir, err := os.MkdirTemp("", "rocksdb") + require.NoError(t, err) + defer func() { + err := os.RemoveAll(dir) + require.NoError(t, err) + }() + + db, err := openRocksdb(dir, tc.mockAppOptions) + require.NoError(t, err) + require.NoError(t, db.Close()) + + dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db")) + require.NoError(t, err) + require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles()) + require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads()) + require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize()) + require.Equal(t, tc.numLevels, cfOpts.GetNumLevels()) + }) + } + }) + + t.Run("db doesn't exist yet", func(t *testing.T) { + defaultOpts := newDefaultOptions() + + dir, err := os.MkdirTemp("", "rocksdb") + require.NoError(t, err) + defer func() { + err := os.RemoveAll(dir) + require.NoError(t, err) + }() + + mockAppOpts := newMockAppOptions(map[string]interface{}{}) + db, err := openRocksdb(dir, mockAppOpts) + require.NoError(t, err) + require.NoError(t, db.Close()) + + dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db")) + require.NoError(t, err) + require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles()) + require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads()) + require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize()) + require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels()) + }) +} + +func TestLoadLatestOptions(t *testing.T) { + t.Run("db already exists", func(t *testing.T) { + defaultOpts := newDefaultOptions() + + const testCasesNum = 3 + dbOptsList := make([]*grocksdb.Options, testCasesNum) + cfOptsList := make([]*grocksdb.Options, testCasesNum) + + dbOptsList[0] = newDefaultOptions() + cfOptsList[0] = newDefaultOptions() + + dbOptsList[1] = newDefaultOptions() + dbOptsList[1].SetMaxOpenFiles(999) + cfOptsList[1] = newDefaultOptions() + cfOptsList[1].SetWriteBufferSize(999_999) + + dbOptsList[2] = newDefaultOptions() + dbOptsList[2].SetMaxOpenFiles(999) + dbOptsList[2].SetMaxFileOpeningThreads(9) + cfOptsList[2] = newDefaultOptions() + cfOptsList[2].SetWriteBufferSize(999_999) + cfOptsList[2].SetNumLevels(9) + + for _, tc := range []struct { + desc string + dbOpts *grocksdb.Options + cfOpts *grocksdb.Options + maxOpenFiles int + maxFileOpeningThreads int + writeBufferSize uint64 + numLevels int + }{ + { + desc: "default options", + dbOpts: dbOptsList[0], + cfOpts: cfOptsList[0], + maxOpenFiles: defaultOpts.GetMaxOpenFiles(), + maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(), + writeBufferSize: defaultOpts.GetWriteBufferSize(), + numLevels: defaultOpts.GetNumLevels(), + }, + { + desc: "change 2 options", + dbOpts: dbOptsList[1], + cfOpts: cfOptsList[1], + maxOpenFiles: 999, + maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(), + writeBufferSize: 999_999, + numLevels: defaultOpts.GetNumLevels(), + }, + { + desc: "change 4 options", + dbOpts: dbOptsList[2], + cfOpts: cfOptsList[2], + maxOpenFiles: 999, + maxFileOpeningThreads: 9, + writeBufferSize: 999_999, + numLevels: 9, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + name := "application" + dir, err := os.MkdirTemp("", "rocksdb") + require.NoError(t, err) + defer func() { + err := os.RemoveAll(dir) + require.NoError(t, err) + }() + + db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, true, defaultReportMetricsIntervalSecs) + require.NoError(t, err) + require.NoError(t, db.Close()) + + dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db")) + require.NoError(t, err) + require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles()) + require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads()) + require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize()) + require.Equal(t, tc.numLevels, cfOpts.GetNumLevels()) + }) + } + }) + + t.Run("db doesn't exist yet", func(t *testing.T) { + defaultOpts := newDefaultOptions() + + dir, err := os.MkdirTemp("", "rocksdb") + require.NoError(t, err) + defer func() { + err := os.RemoveAll(dir) + require.NoError(t, err) + }() + + dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db")) + require.NoError(t, err) + require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles()) + require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads()) + require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize()) + require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels()) + }) +} + +func TestOverrideDBOpts(t *testing.T) { + defaultOpts := newDefaultOptions() + + for _, tc := range []struct { + desc string + mockAppOptions *mockAppOptions + maxOpenFiles int + maxFileOpeningThreads int + }{ + { + desc: "override nothing", + mockAppOptions: newMockAppOptions(map[string]interface{}{}), + maxOpenFiles: defaultOpts.GetMaxOpenFiles(), + maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(), + }, + { + desc: "override max-open-files", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + maxOpenFilesDBOptName: 999, + }), + maxOpenFiles: 999, + maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(), + }, + { + desc: "override max-file-opening-threads", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + maxFileOpeningThreadsDBOptName: 9, + }), + maxOpenFiles: defaultOpts.GetMaxOpenFiles(), + maxFileOpeningThreads: 9, + }, + { + desc: "override max-open-files and max-file-opening-threads", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + maxOpenFilesDBOptName: 999, + maxFileOpeningThreadsDBOptName: 9, + }), + maxOpenFiles: 999, + maxFileOpeningThreads: 9, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + dbOpts := newDefaultOptions() + dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions) + + require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles()) + require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads()) + }) + } +} + +func TestOverrideCFOpts(t *testing.T) { + defaultOpts := newDefaultOptions() + + for _, tc := range []struct { + desc string + mockAppOptions *mockAppOptions + writeBufferSize uint64 + numLevels int + }{ + { + desc: "override nothing", + mockAppOptions: newMockAppOptions(map[string]interface{}{}), + writeBufferSize: defaultOpts.GetWriteBufferSize(), + numLevels: defaultOpts.GetNumLevels(), + }, + { + desc: "override write-buffer-size", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + writeBufferSizeCFOptName: 999_999, + }), + writeBufferSize: 999_999, + numLevels: defaultOpts.GetNumLevels(), + }, + { + desc: "override num-levels", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + numLevelsCFOptName: 9, + }), + writeBufferSize: defaultOpts.GetWriteBufferSize(), + numLevels: 9, + }, + { + desc: "override write-buffer-size and num-levels", + mockAppOptions: newMockAppOptions(map[string]interface{}{ + writeBufferSizeCFOptName: 999_999, + numLevelsCFOptName: 9, + }), + writeBufferSize: 999_999, + numLevels: 9, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + cfOpts := newDefaultOptions() + cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions) + + require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize()) + require.Equal(t, tc.numLevels, cfOpts.GetNumLevels()) + }) + } +} + +func TestNewRocksDBWithOptions(t *testing.T) { + defaultOpts := newDefaultOptions() + + name := "application" + dir, err := os.MkdirTemp("", "rocksdb") + require.NoError(t, err) + defer func() { + err := os.RemoveAll(dir) + require.NoError(t, err) + }() + + dbOpts := newDefaultOptions() + dbOpts.SetMaxOpenFiles(999) + cfOpts := newDefaultOptions() + cfOpts.SetWriteBufferSize(999_999) + + db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, true, defaultReportMetricsIntervalSecs) + require.NoError(t, err) + require.NoError(t, db.Close()) + + dbOpts, cfOpts, err = loadLatestOptions(filepath.Join(dir, "application.db")) + require.NoError(t, err) + require.Equal(t, 999, dbOpts.GetMaxOpenFiles()) + require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads()) + require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize()) + require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels()) +} + +func TestNewDefaultOptions(t *testing.T) { + defaultOpts := newDefaultOptions() + + maxOpenFiles := defaultOpts.GetMaxOpenFiles() + require.Equal(t, 4096, maxOpenFiles) +} diff --git a/cmd/kava/opendb/props_loader.go b/cmd/kava/opendb/props_loader.go new file mode 100644 index 0000000000..16109b75f3 --- /dev/null +++ b/cmd/kava/opendb/props_loader.go @@ -0,0 +1,87 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "fmt" + "strings" + + "errors" +) + +type propsGetter interface { + GetProperty(propName string) (value string) + GetIntProperty(propName string) (value uint64, success bool) +} + +type propsLoader struct { + db propsGetter + errorMsgs []string +} + +func newPropsLoader(db propsGetter) *propsLoader { + return &propsLoader{ + db: db, + errorMsgs: make([]string, 0), + } +} + +func (l *propsLoader) load() (*properties, error) { + props := &properties{ + BaseLevel: l.getIntProperty("rocksdb.base-level"), + BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"), + BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"), + BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"), + CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"), + CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"), + EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"), + EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"), + EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"), + LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"), + SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"), + OptionsStatistics: l.getProperty("rocksdb.options-statistics"), + } + + if len(l.errorMsgs) != 0 { + errorMsg := strings.Join(l.errorMsgs, ";") + return nil, errors.New(errorMsg) + } + + return props, nil +} + +func (l *propsLoader) getProperty(propName string) string { + value := l.db.GetProperty(propName) + if value == "" { + l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName)) + return "" + } + + return value +} + +func (l *propsLoader) getIntProperty(propName string) uint64 { + value, ok := l.db.GetIntProperty(propName) + if !ok { + l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName)) + return 0 + } + + return value +} + +type properties struct { + BaseLevel uint64 + BlockCacheCapacity uint64 + BlockCachePinnedUsage uint64 + BlockCacheUsage uint64 + CurSizeActiveMemTable uint64 + CurSizeAllMemTables uint64 + EstimateLiveDataSize uint64 + EstimateNumKeys uint64 + EstimateTableReadersMem uint64 + LiveSSTFilesSize uint64 + SizeAllMemTables uint64 + OptionsStatistics string +} diff --git a/cmd/kava/opendb/props_loader_test.go b/cmd/kava/opendb/props_loader_test.go new file mode 100644 index 0000000000..2643ab8bd5 --- /dev/null +++ b/cmd/kava/opendb/props_loader_test.go @@ -0,0 +1,112 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type mockPropsGetter struct { + props map[string]string + intProps map[string]uint64 +} + +func newMockPropsGetter( + props map[string]string, + intProps map[string]uint64, +) *mockPropsGetter { + return &mockPropsGetter{ + props: props, + intProps: intProps, + } +} + +func (m *mockPropsGetter) GetProperty(propName string) string { + return m.props[propName] +} + +func (m *mockPropsGetter) GetIntProperty(propName string) (uint64, bool) { + prop, ok := m.intProps[propName] + return prop, ok +} + +func TestPropsLoader(t *testing.T) { + defaultProps := map[string]string{ + "rocksdb.options-statistics": "1", + } + defaultIntProps := map[string]uint64{ + "rocksdb.base-level": 1, + "rocksdb.block-cache-capacity": 2, + "rocksdb.block-cache-pinned-usage": 3, + "rocksdb.block-cache-usage": 4, + "rocksdb.cur-size-active-mem-table": 5, + "rocksdb.cur-size-all-mem-tables": 6, + "rocksdb.estimate-live-data-size": 7, + "rocksdb.estimate-num-keys": 8, + "rocksdb.estimate-table-readers-mem": 9, + "rocksdb.live-sst-files-size": 10, + "rocksdb.size-all-mem-tables": 11, + } + missingProps := make(map[string]string) + missingIntProps := make(map[string]uint64) + defaultExpectedProps := properties{ + BaseLevel: 1, + BlockCacheCapacity: 2, + BlockCachePinnedUsage: 3, + BlockCacheUsage: 4, + CurSizeActiveMemTable: 5, + CurSizeAllMemTables: 6, + EstimateLiveDataSize: 7, + EstimateNumKeys: 8, + EstimateTableReadersMem: 9, + LiveSSTFilesSize: 10, + SizeAllMemTables: 11, + OptionsStatistics: "1", + } + + for _, tc := range []struct { + desc string + props map[string]string + intProps map[string]uint64 + expectedProps *properties + success bool + }{ + { + desc: "success case", + props: defaultProps, + intProps: defaultIntProps, + expectedProps: &defaultExpectedProps, + success: true, + }, + { + desc: "missing props", + props: missingProps, + intProps: defaultIntProps, + expectedProps: nil, + success: false, + }, + { + desc: "missing integer props", + props: defaultProps, + intProps: missingIntProps, + expectedProps: nil, + success: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + mockPropsGetter := newMockPropsGetter(tc.props, tc.intProps) + + propsLoader := newPropsLoader(mockPropsGetter) + actualProps, err := propsLoader.load() + if tc.success { + require.NoError(t, err) + } else { + require.Error(t, err) + } + require.Equal(t, tc.expectedProps, actualProps) + }) + } +} diff --git a/cmd/kava/opendb/stat_parser.go b/cmd/kava/opendb/stat_parser.go new file mode 100644 index 0000000000..926d96c873 --- /dev/null +++ b/cmd/kava/opendb/stat_parser.go @@ -0,0 +1,111 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "fmt" + "strings" + + "errors" +) + +// stat represents one line from rocksdb statistics data, stat may have one or more properties +// examples: +// - rocksdb.block.cache.miss COUNT : 5 +// - rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112 +// `rocksdb.compaction.times.micros` is name of stat, P50, COUNT, SUM, etc... are props of stat +type stat struct { + name string + props map[string]string +} + +// parseSerializedStats parses serialisedStats into map of stat objects +// example of serializedStats: +// rocksdb.block.cache.miss COUNT : 5 +// rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112 +func parseSerializedStats(serializedStats string) (map[string]*stat, error) { + stats := make(map[string]*stat, 0) + + serializedStatList := strings.Split(serializedStats, "\n") + if len(serializedStatList) == 0 { + return nil, errors.New("serializedStats is empty") + } + serializedStatList = serializedStatList[:len(serializedStatList)-1] + // iterate over stats line by line + for _, serializedStat := range serializedStatList { + stat, err := parseSerializedStat(serializedStat) + if err != nil { + return nil, err + } + + stats[stat.name] = stat + } + + return stats, nil +} + +// parseSerializedStat parses serialisedStat into stat object +// example of serializedStat: +// rocksdb.block.cache.miss COUNT : 5 +func parseSerializedStat(serializedStat string) (*stat, error) { + tokens := strings.Split(serializedStat, " ") + tokensNum := len(tokens) + if err := validateTokens(tokens); err != nil { + return nil, fmt.Errorf("tokens are invalid: %v", err) + } + + props := make(map[string]string) + for idx := 1; idx < tokensNum; idx += 3 { + // never should happen, but double check to avoid unexpected panic + if idx+2 >= tokensNum { + break + } + + key := tokens[idx] + sep := tokens[idx+1] + value := tokens[idx+2] + + if err := validateStatProperty(key, value, sep); err != nil { + return nil, fmt.Errorf("invalid stat property: %v", err) + } + + props[key] = value + } + + return &stat{ + name: tokens[0], + props: props, + }, nil +} + +// validateTokens validates that tokens contains name + N triples (key, sep, value) +func validateTokens(tokens []string) error { + tokensNum := len(tokens) + if tokensNum < 4 { + return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens) + } + if (tokensNum-1)%3 != 0 { + return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens) + } + if tokens[0] == "" { + return fmt.Errorf("stat name shouldn't be empty") + } + + return nil +} + +// validateStatProperty validates that key and value are divided by separator and aren't empty +func validateStatProperty(key, value, sep string) error { + if key == "" { + return fmt.Errorf("key shouldn't be empty") + } + if sep != ":" { + return fmt.Errorf("separator should be :") + } + if value == "" { + return fmt.Errorf("value shouldn't be empty") + } + + return nil +} diff --git a/cmd/kava/opendb/stat_parser_test.go b/cmd/kava/opendb/stat_parser_test.go new file mode 100644 index 0000000000..dfeab9a4e8 --- /dev/null +++ b/cmd/kava/opendb/stat_parser_test.go @@ -0,0 +1,208 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseSerializedStats(t *testing.T) { + defaultSerializedStats := `rocksdb.block.cache.miss COUNT : 1 +rocksdb.block.cache.hit COUNT : 2 +rocksdb.block.cache.add COUNT : 3 +rocksdb.block.cache.add.failures COUNT : 4 +rocksdb.compaction.times.micros P50 : 1 P95 : 2 P99 : 3 P100 : 4 COUNT : 5 SUM : 6 +rocksdb.compaction.times.cpu_micros P50 : 7 P95 : 8 P99 : 9 P100 : 10 COUNT : 11 SUM : 12 +` + defaultExpectedStatMap := map[string]*stat{ + "rocksdb.block.cache.miss": { + name: "rocksdb.block.cache.miss", + props: map[string]string{ + "COUNT": "1", + }, + }, + "rocksdb.block.cache.hit": { + name: "rocksdb.block.cache.hit", + props: map[string]string{ + "COUNT": "2", + }, + }, + "rocksdb.block.cache.add": { + name: "rocksdb.block.cache.add", + props: map[string]string{ + "COUNT": "3", + }, + }, + "rocksdb.block.cache.add.failures": { + name: "rocksdb.block.cache.add.failures", + props: map[string]string{ + "COUNT": "4", + }, + }, + "rocksdb.compaction.times.micros": { + name: "rocksdb.compaction.times.micros", + props: map[string]string{ + "P50": "1", + "P95": "2", + "P99": "3", + "P100": "4", + "COUNT": "5", + "SUM": "6", + }, + }, + "rocksdb.compaction.times.cpu_micros": { + name: "rocksdb.compaction.times.cpu_micros", + props: map[string]string{ + "P50": "7", + "P95": "8", + "P99": "9", + "P100": "10", + "COUNT": "11", + "SUM": "12", + }, + }, + } + + for _, tc := range []struct { + desc string + serializedStats string + expectedStatMap map[string]*stat + errMsg string + }{ + { + desc: "success case", + serializedStats: defaultSerializedStats, + expectedStatMap: defaultExpectedStatMap, + errMsg: "", + }, + { + desc: "missing value #1", + serializedStats: `rocksdb.block.cache.miss COUNT : +`, + expectedStatMap: nil, + errMsg: "invalid number of tokens", + }, + { + desc: "missing value #2", + serializedStats: `rocksdb.compaction.times.micros P50 : 1 P95 : +`, + expectedStatMap: nil, + errMsg: "invalid number of tokens", + }, + { + desc: "missing stat name", + serializedStats: ` COUNT : 1 +`, + expectedStatMap: nil, + errMsg: "stat name shouldn't be empty", + }, + { + desc: "empty stat", + serializedStats: ``, + expectedStatMap: make(map[string]*stat), + errMsg: "", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + actualStatMap, err := parseSerializedStats(tc.serializedStats) + if tc.errMsg == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errMsg) + } + require.Equal(t, tc.expectedStatMap, actualStatMap) + }) + } +} + +func TestValidateTokens(t *testing.T) { + for _, tc := range []struct { + desc string + tokens []string + errMsg string + }{ + { + desc: "success case", + tokens: []string{"name", "key", ":", "value"}, + errMsg: "", + }, + { + desc: "missing value #1", + tokens: []string{"name", "key", ":"}, + errMsg: "invalid number of tokens", + }, + { + desc: "missing value #2", + tokens: []string{"name", "key", ":", "value", "key2", ":"}, + errMsg: "invalid number of tokens", + }, + { + desc: "empty stat name", + tokens: []string{"", "key", ":", "value"}, + errMsg: "stat name shouldn't be empty", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + err := validateTokens(tc.tokens) + if tc.errMsg == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errMsg) + } + }) + } +} + +func TestValidateStatProperty(t *testing.T) { + for _, tc := range []struct { + desc string + key string + value string + sep string + errMsg string + }{ + { + desc: "success case", + key: "key", + value: "value", + sep: ":", + errMsg: "", + }, + { + desc: "missing key", + key: "", + value: "value", + sep: ":", + errMsg: "key shouldn't be empty", + }, + { + desc: "missing value", + key: "key", + value: "", + sep: ":", + errMsg: "value shouldn't be empty", + }, + { + desc: "invalid separator", + key: "key", + value: "value", + sep: "#", + errMsg: "separator should be :", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + err := validateStatProperty(tc.key, tc.value, tc.sep) + if tc.errMsg == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errMsg) + } + }) + } +} diff --git a/cmd/kava/opendb/stats_loader.go b/cmd/kava/opendb/stats_loader.go new file mode 100644 index 0000000000..93e5684c50 --- /dev/null +++ b/cmd/kava/opendb/stats_loader.go @@ -0,0 +1,263 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "fmt" + "strconv" +) + +const ( + sum = "SUM" + count = "COUNT" + p50 = "P50" + p95 = "P95" + p99 = "P99" + p100 = "P100" +) + +type statLoader struct { + // statMap contains map of stat objects returned by parseSerializedStats function + // example of stats: + // #1: rocksdb.block.cache.miss COUNT : 5 + // #2: rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112 + // #1 case will be cast into int64 + // #2 case will be cast into float64Histogram + statMap map[string]*stat + + // NOTE: some methods accumulate errors instead of returning them, these methods are private and not intended to use outside + errors []error +} + +func newStatLoader(statMap map[string]*stat) *statLoader { + return &statLoader{ + statMap: statMap, + errors: make([]error, 0), + } +} + +type stats struct { + NumberKeysWritten int64 + NumberKeysRead int64 + NumberKeysUpdated int64 + + // total block cache misses + // BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + + // BLOCK_CACHE_FILTER_MISS + + // BLOCK_CACHE_DATA_MISS; + // BLOCK_CACHE_INDEX_MISS: # of times cache miss when accessing index block from block cache. + // BLOCK_CACHE_FILTER_MISS: # of times cache miss when accessing filter block from block cache. + // BLOCK_CACHE_DATA_MISS: # of times cache miss when accessing data block from block cache. + BlockCacheMiss int64 + + // total block cache hit + // BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + + // BLOCK_CACHE_FILTER_HIT + + // BLOCK_CACHE_DATA_HIT; + // BLOCK_CACHE_INDEX_HIT: # of times cache hit when accessing index block from block cache. + // BLOCK_CACHE_FILTER_HIT: # of times cache hit when accessing filter block from block cache. + // BLOCK_CACHE_DATA_HIT: # of times cache hit when accessing data block from block cache. + BlockCacheHit int64 + + // # of blocks added to block cache. + BlockCacheAdd int64 + // # of failures when adding blocks to block cache. + BlockCacheAddFailures int64 + + CompactReadBytes int64 // Bytes read during compaction + CompactWriteBytes int64 // Bytes written during compaction + + CompactionTimesMicros *float64Histogram + CompactionTimesCPUMicros *float64Histogram + NumFilesInSingleCompaction *float64Histogram + + // Read amplification statistics. + // Read amplification can be calculated using this formula + // (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) + // + // REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled + // TODO(yevhenii): seems not working? + ReadAmpEstimateUsefulBytes int64 // Estimate of total bytes actually used. + ReadAmpTotalReadBytes int64 // Total size of loaded data blocks. + + NumberFileOpens int64 + NumberFileErrors int64 + + // # of times bloom filter has avoided file reads, i.e., negatives. + BloomFilterUseful int64 + // # of times bloom FullFilter has not avoided the reads. + BloomFilterFullPositive int64 + // # of times bloom FullFilter has not avoided the reads and data actually + // exist. + BloomFilterFullTruePositive int64 + + // # of memtable hits. + MemtableHit int64 + // # of memtable misses. + MemtableMiss int64 + + // # of Get() queries served by L0 + GetHitL0 int64 + // # of Get() queries served by L1 + GetHitL1 int64 + // # of Get() queries served by L2 and up + GetHitL2AndUp int64 + + // The number of uncompressed bytes issued by DB::Put(), DB::Delete(), + // DB::Merge(), and DB::Write(). + BytesWritten int64 + // The number of uncompressed bytes read from DB::Get(). It could be + // either from memtables, cache, or table files. + // For the number of logical bytes read from DB::MultiGet(), + // please use NUMBER_MULTIGET_BYTES_READ. + BytesRead int64 + + // Writer has to wait for compaction or flush to finish. + StallMicros int64 + + // Last level and non-last level read statistics + LastLevelReadBytes int64 + LastLevelReadCount int64 + NonLastLevelReadBytes int64 + NonLastLevelReadCount int64 + + DBGetMicros *float64Histogram + DBWriteMicros *float64Histogram + + // Value size distribution in each operation + BytesPerRead *float64Histogram + BytesPerWrite *float64Histogram + BytesPerMultiget *float64Histogram + + // Time spent flushing memtable to disk + FlushMicros *float64Histogram +} + +type float64Histogram struct { + Sum float64 + Count float64 + P50 float64 + P95 float64 + P99 float64 + P100 float64 +} + +func (l *statLoader) error() error { + if len(l.errors) != 0 { + return fmt.Errorf("%v", l.errors) + } + + return nil +} + +func (l *statLoader) load() (*stats, error) { + stats := &stats{ + NumberKeysWritten: l.getInt64StatValue("rocksdb.number.keys.written", count), + NumberKeysRead: l.getInt64StatValue("rocksdb.number.keys.read", count), + NumberKeysUpdated: l.getInt64StatValue("rocksdb.number.keys.updated", count), + BlockCacheMiss: l.getInt64StatValue("rocksdb.block.cache.miss", count), + BlockCacheHit: l.getInt64StatValue("rocksdb.block.cache.hit", count), + BlockCacheAdd: l.getInt64StatValue("rocksdb.block.cache.add", count), + BlockCacheAddFailures: l.getInt64StatValue("rocksdb.block.cache.add.failures", count), + CompactReadBytes: l.getInt64StatValue("rocksdb.compact.read.bytes", count), + CompactWriteBytes: l.getInt64StatValue("rocksdb.compact.write.bytes", count), + CompactionTimesMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.micros"), + CompactionTimesCPUMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.cpu_micros"), + NumFilesInSingleCompaction: l.getFloat64HistogramStatValue("rocksdb.numfiles.in.singlecompaction"), + ReadAmpEstimateUsefulBytes: l.getInt64StatValue("rocksdb.read.amp.estimate.useful.bytes", count), + ReadAmpTotalReadBytes: l.getInt64StatValue("rocksdb.read.amp.total.read.bytes", count), + NumberFileOpens: l.getInt64StatValue("rocksdb.no.file.opens", count), + NumberFileErrors: l.getInt64StatValue("rocksdb.no.file.errors", count), + BloomFilterUseful: l.getInt64StatValue("rocksdb.bloom.filter.useful", count), + BloomFilterFullPositive: l.getInt64StatValue("rocksdb.bloom.filter.full.positive", count), + BloomFilterFullTruePositive: l.getInt64StatValue("rocksdb.bloom.filter.full.true.positive", count), + MemtableHit: l.getInt64StatValue("rocksdb.memtable.hit", count), + MemtableMiss: l.getInt64StatValue("rocksdb.memtable.miss", count), + GetHitL0: l.getInt64StatValue("rocksdb.l0.hit", count), + GetHitL1: l.getInt64StatValue("rocksdb.l1.hit", count), + GetHitL2AndUp: l.getInt64StatValue("rocksdb.l2andup.hit", count), + BytesWritten: l.getInt64StatValue("rocksdb.bytes.written", count), + BytesRead: l.getInt64StatValue("rocksdb.bytes.read", count), + StallMicros: l.getInt64StatValue("rocksdb.stall.micros", count), + LastLevelReadBytes: l.getInt64StatValue("rocksdb.last.level.read.bytes", count), + LastLevelReadCount: l.getInt64StatValue("rocksdb.last.level.read.count", count), + NonLastLevelReadBytes: l.getInt64StatValue("rocksdb.non.last.level.read.bytes", count), + NonLastLevelReadCount: l.getInt64StatValue("rocksdb.non.last.level.read.count", count), + DBGetMicros: l.getFloat64HistogramStatValue("rocksdb.db.get.micros"), + DBWriteMicros: l.getFloat64HistogramStatValue("rocksdb.db.write.micros"), + BytesPerRead: l.getFloat64HistogramStatValue("rocksdb.bytes.per.read"), + BytesPerWrite: l.getFloat64HistogramStatValue("rocksdb.bytes.per.write"), + BytesPerMultiget: l.getFloat64HistogramStatValue("rocksdb.bytes.per.multiget"), + FlushMicros: l.getFloat64HistogramStatValue("rocksdb.db.flush.micros"), + } + + err := l.error() + if err != nil { + return nil, err + } + + return stats, nil +} + +// getFloat64HistogramStatValue converts stat object into float64Histogram +func (l *statLoader) getFloat64HistogramStatValue(statName string) *float64Histogram { + return &float64Histogram{ + Sum: l.getFloat64StatValue(statName, sum), + Count: l.getFloat64StatValue(statName, count), + P50: l.getFloat64StatValue(statName, p50), + P95: l.getFloat64StatValue(statName, p95), + P99: l.getFloat64StatValue(statName, p99), + P100: l.getFloat64StatValue(statName, p100), + } +} + +// getInt64StatValue converts property of stat object into int64 +func (l *statLoader) getInt64StatValue(statName, propName string) int64 { + stringVal := l.getStatValue(statName, propName) + if stringVal == "" { + l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName)) + return 0 + } + + intVal, err := strconv.ParseInt(stringVal, 10, 64) + if err != nil { + l.errors = append(l.errors, fmt.Errorf("can't parse int: %v", err)) + return 0 + } + + return intVal +} + +// getFloat64StatValue converts property of stat object into float64 +func (l *statLoader) getFloat64StatValue(statName, propName string) float64 { + stringVal := l.getStatValue(statName, propName) + if stringVal == "" { + l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName)) + return 0 + } + + floatVal, err := strconv.ParseFloat(stringVal, 64) + if err != nil { + l.errors = append(l.errors, fmt.Errorf("can't parse float: %v", err)) + return 0 + } + + return floatVal +} + +// getStatValue gets property of stat object +func (l *statLoader) getStatValue(statName, propName string) string { + stat, ok := l.statMap[statName] + if !ok { + l.errors = append(l.errors, fmt.Errorf("stat %v doesn't exist", statName)) + return "" + } + prop, ok := stat.props[propName] + if !ok { + l.errors = append(l.errors, fmt.Errorf("stat %v doesn't have %v property", statName, propName)) + return "" + } + + return prop +} diff --git a/cmd/kava/opendb/stats_loader_test.go b/cmd/kava/opendb/stats_loader_test.go new file mode 100644 index 0000000000..a6652dad25 --- /dev/null +++ b/cmd/kava/opendb/stats_loader_test.go @@ -0,0 +1,80 @@ +//go:build rocksdb +// +build rocksdb + +package opendb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStatsLoader(t *testing.T) { + defaultStat := stat{ + props: map[string]string{ + "COUNT": "1", + }, + } + defaultHistogramStat := stat{ + props: map[string]string{ + "P50": "1", + "P95": "2", + "P99": "3", + "P100": "4", + "COUNT": "5", + "SUM": "6", + }, + } + defaultStatMap := map[string]*stat{ + "rocksdb.number.keys.written": &defaultStat, + "rocksdb.number.keys.read": &defaultStat, + "rocksdb.number.keys.updated": &defaultStat, + "rocksdb.block.cache.miss": &defaultStat, + "rocksdb.block.cache.hit": &defaultStat, + "rocksdb.block.cache.add": &defaultStat, + "rocksdb.block.cache.add.failures": &defaultStat, + "rocksdb.compact.read.bytes": &defaultStat, + "rocksdb.compact.write.bytes": &defaultStat, + "rocksdb.compaction.times.micros": &defaultHistogramStat, + "rocksdb.compaction.times.cpu_micros": &defaultHistogramStat, + "rocksdb.numfiles.in.singlecompaction": &defaultHistogramStat, + "rocksdb.read.amp.estimate.useful.bytes": &defaultStat, + "rocksdb.read.amp.total.read.bytes": &defaultStat, + "rocksdb.no.file.opens": &defaultStat, + "rocksdb.no.file.errors": &defaultStat, + "rocksdb.bloom.filter.useful": &defaultStat, + "rocksdb.bloom.filter.full.positive": &defaultStat, + "rocksdb.bloom.filter.full.true.positive": &defaultStat, + "rocksdb.memtable.hit": &defaultStat, + "rocksdb.memtable.miss": &defaultStat, + "rocksdb.l0.hit": &defaultStat, + "rocksdb.l1.hit": &defaultStat, + "rocksdb.l2andup.hit": &defaultStat, + "rocksdb.bytes.written": &defaultStat, + "rocksdb.bytes.read": &defaultStat, + "rocksdb.stall.micros": &defaultStat, + "rocksdb.last.level.read.bytes": &defaultStat, + "rocksdb.last.level.read.count": &defaultStat, + "rocksdb.non.last.level.read.bytes": &defaultStat, + "rocksdb.non.last.level.read.count": &defaultStat, + "rocksdb.db.get.micros": &defaultHistogramStat, + "rocksdb.db.write.micros": &defaultHistogramStat, + "rocksdb.bytes.per.read": &defaultHistogramStat, + "rocksdb.bytes.per.write": &defaultHistogramStat, + "rocksdb.bytes.per.multiget": &defaultHistogramStat, + "rocksdb.db.flush.micros": &defaultHistogramStat, + } + + statLoader := newStatLoader(defaultStatMap) + stats, err := statLoader.load() + require.NoError(t, err) + + require.Equal(t, stats.NumberKeysWritten, int64(1)) + require.Equal(t, stats.NumberKeysRead, int64(1)) + require.Equal(t, stats.CompactionTimesMicros.P50, float64(1)) + require.Equal(t, stats.CompactionTimesMicros.P95, float64(2)) + require.Equal(t, stats.CompactionTimesMicros.P99, float64(3)) + require.Equal(t, stats.CompactionTimesMicros.P100, float64(4)) + require.Equal(t, stats.CompactionTimesMicros.Count, float64(5)) + require.Equal(t, stats.CompactionTimesMicros.Sum, float64(6)) +} diff --git a/go.mod b/go.mod index a63b666236..17070e8bf0 100644 --- a/go.mod +++ b/go.mod @@ -14,10 +14,11 @@ require ( github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/kava-labs/kava-bridge v0.2.0 + github.com/linxGnu/grocksdb v1.8.0 github.com/prometheus/client_golang v1.12.2 github.com/spf13/cast v1.5.0 github.com/spf13/cobra v1.5.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.3 github.com/tendermint/tendermint v0.34.21 github.com/tendermint/tm-db v0.6.7 github.com/tharsis/ethermint v0.14.0 @@ -98,7 +99,6 @@ require ( github.com/klauspost/compress v1.15.9 // indirect github.com/lib/pq v1.10.6 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.7.1 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -131,7 +131,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.12.0 // indirect github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 // indirect - github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/tendermint/btcd v0.1.1 // indirect @@ -170,9 +170,9 @@ replace ( // Use the cosmos modified protobufs github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // Use rocksdb 7.1.2 - github.com/tendermint/tm-db => github.com/kava-labs/tm-db v0.6.7-kava.1 + github.com/tendermint/tm-db => github.com/kava-labs/tm-db v0.6.7-kava.4 // Use ethermint fork that respects min-gas-price with NoBaseFee true and london enabled, newProto w/ height parameter - github.com/tharsis/ethermint => github.com/Kava-Labs/ethermint v0.14.0-kava-v18.2 + github.com/tharsis/ethermint => github.com/Kava-Labs/ethermint v0.14.0-kava-v18.3 // Make sure that we use grpc compatible with cosmos google.golang.org/grpc => google.golang.org/grpc v1.33.2 ) diff --git a/go.sum b/go.sum index 7f39098310..b6ea491656 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,8 @@ github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Kava-Labs/ethermint v0.14.0-kava-v18.2 h1:Im1LUDEQjicVQHbW/7ZGmnKOhwKCo2T4pf5oVrN8kAQ= -github.com/Kava-Labs/ethermint v0.14.0-kava-v18.2/go.mod h1:+/TXGhi6XwF9B8h2NrYZCKrrv0+pC3RkpM0950lYyJg= +github.com/Kava-Labs/ethermint v0.14.0-kava-v18.3 h1:wGjy9R2geEiAdPzGLEc2m2ZWf+awJIK6QkdLp/DFVM0= +github.com/Kava-Labs/ethermint v0.14.0-kava-v18.3/go.mod h1:+/TXGhi6XwF9B8h2NrYZCKrrv0+pC3RkpM0950lYyJg= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -570,8 +570,8 @@ github.com/kava-labs/cosmos-sdk v0.45.9-kava.1 h1:1oaPsDNuLT+0tZtoN0SkJg1LngNnm6 github.com/kava-labs/cosmos-sdk v0.45.9-kava.1/go.mod h1:Z5M4TX7PsHNHlF/1XanI2DIpORQ+Q/st7oaeufEjnvU= github.com/kava-labs/kava-bridge v0.2.0 h1:5t9AsPUy9Riwi+gRLgozhv0dut/xQcvRF+yIB+wLuo8= github.com/kava-labs/kava-bridge v0.2.0/go.mod h1:APqFV9V/rmkm3MnJ27R/CGOjteCX+3mVuRTYE70v7Yw= -github.com/kava-labs/tm-db v0.6.7-kava.1 h1:7cVYlvWx1yP+gGdaAWcfm6NwMLzf4z6DxXguWn3+O3w= -github.com/kava-labs/tm-db v0.6.7-kava.1/go.mod h1:HVZfZzWXuqWseXQVplxsWXK6kLHLkk3kQB6c+nuSZvk= +github.com/kava-labs/tm-db v0.6.7-kava.4 h1:M2RibOKmbi+k2OhAFry8z9+RJF0CYuDETB7/PrSdoro= +github.com/kava-labs/tm-db v0.6.7-kava.4/go.mod h1:70tpLhNfwCP64nAlq+bU+rOiVfWr3Nnju1D1nhGDGKs= github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -611,8 +611,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.7.1 h1:KBdzX2OQ6tZcZglsRdBwZmGpwHTEb+VqXR5iLuh72+Q= -github.com/linxGnu/grocksdb v1.7.1/go.mod h1:Puj0cSlfTWTp9UdIBijNtNrudkMXXu4er2g+p+CvlJo= +github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= +github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= @@ -881,8 +881,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -892,8 +893,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=