Skip to content
This repository has been archived by the owner on Sep 30, 2023. It is now read-only.

node and browser benchmark runner #32

Merged
merged 98 commits into from
May 31, 2021
Merged
Show file tree
Hide file tree
Changes from 88 commits
Commits
Show all changes
98 commits
Select commit Hold shift + click to select a range
0df423e
fix: correct npm package name
tabcat Mar 15, 2021
25e2b94
expose gc as a function
tabcat Mar 15, 2021
f2395fb
install isNode
tabcat Mar 16, 2021
a1d5f4c
add src/benchmarker.js
tabcat Mar 16, 2021
a082984
add needed bind for timeout callback
tabcat Mar 16, 2021
c8fe355
rework benchmarker into http client/server with cli
tabcat Mar 17, 2021
67cf5f8
add commander to dependencies
tabcat Mar 17, 2021
2846f25
fix program options usage
tabcat Mar 17, 2021
3f35268
fix cli ls exec
tabcat Mar 17, 2021
d4b06a0
make child process exec async
tabcat Mar 17, 2021
b2fc619
use node-fetch instead of whatwg-fetch polyfill
tabcat Mar 17, 2021
b52eeb2
change benchmarker http-client addMetric api
tabcat Mar 17, 2021
ba0ee13
fix benchmarker server create and _handleResults
tabcat Mar 17, 2021
9b6ea3c
logging over websockets
tabcat Mar 18, 2021
baeb965
remove unneeded timeout
tabcat Mar 18, 2021
26b068c
write result json files to results folder
tabcat Mar 18, 2021
94a6571
remove whatwg-fetch polyfill
tabcat Mar 18, 2021
2a3f148
benchmarker better logging
tabcat Mar 18, 2021
3c3a1d3
results over websockets
tabcat Mar 19, 2021
c4a2a33
error logging and use stdout instead of console.log
tabcat Mar 19, 2021
045cc37
remove http from benchmarker server
tabcat Mar 19, 2021
7fe4493
prep for browser support
tabcat Mar 19, 2021
ad392e3
install webpack middleware and express
Mar 19, 2021
a1cd244
install html-webpack-plugin
Mar 19, 2021
9db9d06
install val-loader
Mar 20, 2021
7856bb1
downgrade webpack to v4
Mar 20, 2021
48430e4
downgrade html-webpack-plugin to support webpack4
Mar 20, 2021
e30a7b8
downgrade val-loader
Mar 20, 2021
fa7554b
add newlines for cancel/complete status
Mar 20, 2021
62d9c7a
manual browser support (no puppeteer yet)
Mar 20, 2021
1192ee3
automate browser benchmarks with puppeteer
tabcat Mar 20, 2021
6d64605
removed uneeded promisify
tabcat Mar 21, 2021
04ad0b0
use fork over exec in benchmarker cli
tabcat Mar 21, 2021
2338cd4
move webpack to its own thread
tabcat Mar 21, 2021
d0ad874
ensure split works for large ls return
tabcat Mar 22, 2021
a5cc0e0
cleanup and minor changes
tabcat Mar 22, 2021
ac7b3c4
use open ports, remove port argument
tabcat Mar 22, 2021
5bfa43b
install ws websocket server
tabcat Mar 22, 2021
d03d3d2
create results dir path on results
tabcat Mar 22, 2021
8699a66
move server variable into runBenchmarks
tabcat Mar 22, 2021
3cbe290
fix typo
tabcat Mar 22, 2021
34c5e26
add bundling... message
tabcat Mar 22, 2021
8d20f0c
use console.log for log messages
tabcat Mar 23, 2021
25a2d22
fix window.performance.memory accuracy
tabcat Mar 24, 2021
dd69391
use already opened browser page
tabcat Mar 24, 2021
59d5e8a
add puppeteer to deps
tabcat Mar 31, 2021
9903131
move default metrics to separate file
tabcat Mar 31, 2021
e5fc31a
get ready to build fixtures
tabcat Apr 1, 2021
3a0745f
edit package deps and npm audit fix
tabcat Apr 6, 2021
76385c1
ignore fixtures dir
tabcat Apr 6, 2021
e4107e9
refactor benchmarker; all working but reports
tabcat Apr 6, 2021
cf77394
remove old benchmark runner files
tabcat Apr 6, 2021
cf5556b
edit deps; commit package and package-lock
tabcat Apr 9, 2021
92ada32
base working
tabcat Apr 9, 2021
2278a9f
add log-load benchmark
tabcat Apr 9, 2021
7c3d93c
change benchmark setting
tabcat Apr 9, 2021
bb7a78a
use execBenchmarkPath variable
tabcat Apr 12, 2021
d3fd012
stop using fixtures
tabcat Apr 14, 2021
2e5b35e
more report outputs
tabcat Apr 14, 2021
2e7df88
add ordered benchmarks
tabcat Apr 14, 2021
2b1035f
add process-results and report util file
tabcat Apr 14, 2021
bc6707c
get percent change for time metric
tabcat Apr 14, 2021
bc13cb3
remove getLabel from process-results
tabcat Apr 14, 2021
3e98fa4
make dir for output path
tabcat Apr 14, 2021
6b8b2e9
no fixtures or hard coded port; small cleanup
tabcat Apr 14, 2021
f204d33
move Report component to parent dir
tabcat Apr 14, 2021
91015d8
change option order
tabcat Apr 14, 2021
c8a89e2
optionally track mem/cpu
tabcat Apr 19, 2021
f36bb87
benchmarks path param for cli
tabcat Apr 19, 2021
c229048
add catch to webpackServer call
tabcat Apr 19, 2021
9f5ec91
add benchmarking... console message
tabcat Apr 19, 2021
9adb611
fix webpack-server
tabcat Apr 19, 2021
0bb9301
remove local benchmarks
tabcat Apr 20, 2021
490450e
no written output by default
tabcat Apr 20, 2021
9aae43b
remove --no-output option
tabcat Apr 20, 2021
f2af8e6
fix avg processed metric
tabcat Apr 21, 2021
37654bf
change benchmarker server variable name
tabcat Apr 21, 2021
59d1be5
reuse webpack port for indexedDb
tabcat Apr 21, 2021
282d227
change baselines option flag
tabcat Apr 21, 2021
0264cd7
reword opt description; change -b default
tabcat Apr 22, 2021
0935a9c
add basic usage to README.md
tabcat Apr 22, 2021
9696217
remove fixtures from gitignore
tabcat Apr 26, 2021
c22f492
remove tests for now
tabcat Apr 26, 2021
bdca18b
remove runPlace leftover from run.js
tabcat Apr 29, 2021
47b9f5e
browser name property for execBenchmarks
tabcat Apr 29, 2021
379a9b5
static webpack port
tabcat Apr 29, 2021
1dcbdec
make reporter/process-results.js more readable
tabcat Apr 29, 2021
4e247fa
fix: webpack-entry use run func again
tabcat Apr 30, 2021
0ee6f76
change tempdir name for benchmark runner
tabcat May 2, 2021
d36133e
remove outdated comment
tabcat May 2, 2021
ea6bd94
small style edit cli.js
tabcat May 2, 2021
149fab5
check for baseline path exist
tabcat May 3, 2021
cbaed0f
add baseline comparison example
tabcat May 3, 2021
3aa8176
add basic end2end and cli option tests
tabcat May 3, 2021
4986962
add docs on creating benchmarks
tabcat May 3, 2021
87981de
add 30 sec timeout to tests
tabcat May 3, 2021
94a882b
support benchmark hooks
tabcat May 5, 2021
80ac3ae
console report spacing and negative array length
tabcat May 5, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 13 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,23 @@

## Install

`npm i benchmark-runner`
`npm i orbit-db-benchmark-runner`

## Usage
## CLI Usage

TBD
Check [cli.js](./src/cli.js) or use `npx benchmarker -h` for help

## Testing
```
Options:
-V, --version output the version number
-b, --benchmarks <path> benchmark folder or file (default: "./benchmarks")
-o, --output <file path> report output path (.html or .json)
-i, --baselines <path> baselines to use for comparison (.json output)
--no-node skip nodejs benchmarks
--no-browser skip browser benchmarks
```

Mocha is used as the testing framework, SinonJS for stubs and mocks and ChaiJS for assertions. To run tests:

`npm run test`
**benchmarks ran for comparison are best ran on their own machine or a machine with few other things happening in the background**

## Contributing

Expand Down
20,013 changes: 17,291 additions & 2,722 deletions package-lock.json

Large diffs are not rendered by default.

42 changes: 29 additions & 13 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,39 @@
"description": "OrbitDB Benchmark Runner",
"main": "./src/index.js",
"bin": {
"benchmark-runner": "./src/cli.js"
},
"scripts": {
"test": "nyc mocha"
"benchmarker": "./src/cli.js"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we change the name of the repo to orbit-db-benchmarker? Thinking about consistency.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this could be changed to anything or kept the same, i chose the different name originally so it didnt conflict. probably good to have the package and repo the same name though (they are different right now; repo: benchmark-runner, package: orbit-db-benchmark-runner). shorter name probably better. also keep in mind the benchmarker doesnt have anything written in that makes it specific to benchmarking orbitdb, it can run benchmarks for anything.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright let's just leave it as is for now until it becomes a real problem

},
"scripts": {},
"author": "mistakia",
"license": "MIT",
"dependencies": {
"expose-gc": "^1.0.0",
"yargs": "^15.4.1"
"@babel/core": "^7.13.10",
"@babel/preset-env": "^7.13.12",
"@babel/preset-react": "^7.12.13",
"@nivo/core": "^0.67.0",
"@nivo/line": "^0.67.0",
"babel-loader": "^8.2.2",
"bootstrap": "^4.6.0",
"commander": "^7.1.0",
"css-loader": "^5.1.3",
"express": "^4.17.1",
"html-webpack-plugin": "^4.5.2",
"inline-assets-html-plugin": "^1.0.0",
"is-node": "^1.0.2",
"puppeteer": "^8.0.0",
"react": "^17.0.2",
"react-bootstrap": "^1.5.2",
"react-dom": "^17.0.2",
"style-loader": "^2.0.0",
"val-loader": "^2.1.2",
"webpack": "^4.46.0",
"webpack-dev-middleware": "^4.1.0",
"ws": "^7.4.4"
},
"devDependencies": {
"ipfs": "^0.54.4",
"orbit-db": "^0.26.1",
"standard": "^14.3.4"
},
"localMaintainers": [
"hajamark <[email protected]>",
Expand All @@ -32,13 +55,6 @@
"homepage": "https://github.com/orbitdb/benchmark-runner#readme",
"bugs": "https://github.com/orbitdb/benchmark-runner/issues",
"repository": "github:orbitdb/benchmark-runner",
"devDependencies": {
"chai": "^4.2.0",
"mocha": "^8.1.3",
"nyc": "^15.1.0",
"sinon": "^9.0.3",
"standard": "^14.3.4"
},
"standard": {
"env": "mocha"
}
Expand Down
116 changes: 116 additions & 0 deletions src/benchmarker/client.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
'use strict'
const isNode = require('is-node')
const nodeDir = (dir) => require('path').join(dir, 'node')
const getWebSocket = () => isNode
? require('ws')
: window.WebSocket
const { makeId, withInfo, creators } = require('./ws-action')
const {
timeMetric,
cpuUsageMetric,
memoryUsedMetric,
memoryTotalMetric
} = require('./metrics')

class Benchmarker {
constructor (ws, dir) {
this._ws = ws
this.dir = isNode ? nodeDir(dir) : dir
this._timeout = null

this.isNode = isNode
this.id = makeId()
this.info = {
id: this.id,
name: `benchmark-${this.id}`,
env: isNode ? 'node' : 'browser',
metrics: []
}
this._interval = 1000 // record metrics every this many ms

this.metrics = []
this.addMetric(timeMetric)
}

static async create (host, dir) {
const ws = await new Promise(resolve => {
const ws = new (getWebSocket())(`ws://${host}`)
ws.onopen = () => resolve(ws)
})
return new Benchmarker(ws, dir)
}

async close () {
if (this._ws.readyState !== 3) {
await new Promise(resolve => {
this._ws.onclose = () => resolve()
this._ws.close()
})
}
}

trackMemory () {
this.addMetric(memoryUsedMetric)
this.addMetric(memoryTotalMetric)
}

trackCpu () {
if (isNode) this.addMetric(cpuUsageMetric)
}

addMetric ({ name, get }) {
if (this.info.metrics.includes(name)) {
throw new Error('a metric with that name already exists')
}
if (this._timeout) {
throw new Error('metrics have already started being recorded')
}
this.metrics.push({ name, get })
this.info.metrics = this.metrics.map(m => m.name)
}

setInterval (interval) {
if (typeof interval !== 'number') {
throw new Error('interval must be a number')
}
if (this._timeout) {
throw new Error('metrics have already started being recorded')
}
this._interval = interval
}

setBenchmarkName (name) {
this.info.name = name.toString()
}

log (msg) {
this._sendAction(creators.LOG(msg))
}

_sendAction (action) {
this._ws.send(JSON.stringify(withInfo(this.info)(action)))
}

_recordMetrics () {
this._sendAction(creators.SEGMENT(this.metrics.map(({ get }) => get())))
}

startRecording () {
if (!this._timeout) {
const interval = this._interval
const repeater = () => {
this._recordMetrics()
this._timeout = setTimeout(repeater.bind(this), interval)
}
repeater()
}
}

stopRecording () {
clearTimeout(this._timeout)
this._timeout = null
this._recordMetrics()
}
}

module.exports = Benchmarker
71 changes: 71 additions & 0 deletions src/benchmarker/metrics/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
'use strict'
const isNode = require('is-node')
const useMetricState = (state, get) => () => {
const { newState, next } = get(state)
state = newState
return next
}

const timeMetric = {
name: 'time',
get: useMetricState(0, (state) => {
const now = Date.now()
return {
newState: state || now,
next: now - (state || now) // on first metric sample: now - now, aka 0
}
})
}

const ns2ms = (ms) => ms / 1000
const cpuUsageMetric = {
name: 'cpu usage',
get: useMetricState(undefined, (state) => {
const time = Date.now()
const { user, system } = process.cpuUsage()
const total = ns2ms(user) + ns2ms(system)
return {
newState: { total, time },
next: state
// cpu usage to percent
? Math.round(100 * ((total - state.total) / (time - state.time)))
: 0
}
})
}

const memorySample = () => {
const sample = isNode
? process.memoryUsage()
: window.performance.memory
const memory = {
total: null,
used: null
}
// denominated in bytes
if (isNode) {
memory.total = sample.heapTotal
memory.used = sample.heapUsed
} else {
memory.total = sample.totalJSHeapSize
memory.used = sample.usedJSHeapSize
}
return memory
}
const toMegabytes = (bytes) => bytes / 1000000
const memoryUsedMetric = {
name: 'heap used',
get: () => toMegabytes(memorySample().used)
}
const memoryTotalMetric = {
name: 'heap total',
get: () => toMegabytes(memorySample().total)
}

module.exports = {
useMetricState,
timeMetric,
cpuUsageMetric,
memoryUsedMetric,
memoryTotalMetric
}
39 changes: 39 additions & 0 deletions src/benchmarker/server.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
'use strict'
const WebSocket = require('ws')
const { parse, types } = require('./ws-action')
const logMessage = (id, msg) =>
`benchmark id:${id}
${msg}
`

class BenchmarkerServer {
constructor ({ port } = {}) {
this._wss = new WebSocket.Server({ port: port || 0 })
this._wss.on('connection', this._handleWsConnection.bind(this))
this.address = this._wss.address.bind(this._wss)
this.results = {}
}

static create (opts) { return new BenchmarkerServer(opts) }

async _handleWsConnection (ws) {
ws.on('message', m => {
const { info, type, msg } = parse(m)
switch (type) {
case types.LOG:
console.log(logMessage(info.id, msg))
break
case types.SEGMENT: {
const { name, env } = info
if (!this.results[name]) this.results[name] = {}
if (!this.results[name][env]) this.results[name][env] = info
if (!this.results[name][env].recorded) this.results[name][env].recorded = []
this.results[name][env].recorded.push(msg)
break
}
}
})
}
}

module.exports = BenchmarkerServer
22 changes: 22 additions & 0 deletions src/benchmarker/ws-action.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
'use strict'

const action = {}

action.types = {
LOG: 'LOG',
SEGMENT: 'SEGMENT'
}

action.creators = {
[action.types.LOG]: (msg) =>
({ type: action.types.LOG, msg }),
[action.types.SEGMENT]: (msg) =>
({ type: action.types.SEGMENT, msg })
}

action.makeId = () => Date.now()
action.withInfo = (info) => (action) => ({ info, ...action })

action.parse = (action) => JSON.parse(action)

module.exports = action
Loading