diff --git a/.github/workflows/benchmark-comparison.yml b/.github/workflows/benchmark-comparison.yml new file mode 100644 index 000000000..7ec886ae2 --- /dev/null +++ b/.github/workflows/benchmark-comparison.yml @@ -0,0 +1,55 @@ +name: Benchmark comparison +on: + workflow_dispatch: + inputs: + bench: + description: 'Benchmarks to run' + required: false + default: '.' + parallelism: + description: 'Number of parallel benchmarks to run' + required: false + default: 5 + duration: + description: 'Duration of each benchmark' + required: false + default: '10s' + count: + description: 'Number of times to run each benchmark ' + required: false + default: 1 + pull_request: + types: [ assigned, opened, synchronize, reopened, labeled ] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number }} + cancel-in-progress: true + +jobs: + BenchmarkCompare: + runs-on: "github-001" + if: contains(github.event.pull_request.labels.*.name, 'benchmarks') + steps: + - uses: 'actions/checkout@v4' + with: + fetch-depth: 0 + - name: Setup Env + uses: ./.github/actions/env + with: + token: ${{ secrets.NUMARY_GITHUB_TOKEN }} + - run: > + /nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" + develop --impure --command just + --justfile ./test/performance/justfile + --working-directory ./test/performance + compare ${{ inputs.bench }} ${{ inputs.parallelism }} ${{ inputs.duration }} ${{ inputs.count }} + - run: > + /nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" + develop --impure --command just + --justfile ./test/performance/justfile + --working-directory ./test/performance + graphs + - uses: actions/upload-artifact@v4 + with: + name: graphs + path: test/performance/report \ No newline at end of file diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 728717115..3f9d82df3 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -1,43 +1,48 @@ name: Benchmark on: workflow_dispatch: - pull_request: - types: [ assigned, opened, synchronize, reopened, labeled ] + inputs: + bench: + description: 'Benchmarks to run' + required: false + default: '.' + parallelism: + description: 'Number of parallel benchmarks to run' + required: false + default: 5 + duration: + description: 'Duration of each benchmark' + required: false + default: '10s' concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: Benchmark: runs-on: "github-001" - if: contains(github.event.pull_request.labels.*.name, 'benchmarks') || github.ref == 'refs/heads/main' + if: github.ref == 'refs/heads/main' steps: - uses: 'actions/checkout@v4' with: fetch-depth: 0 - - run: go build -o /tmp/ledger ./ - - run: echo "running actions as ${USER}" - - run: > - /tmp/ledger serve - --postgres-uri=postgres://formance:formance@127.0.0.1/ledger - --postgres-conn-max-idle-time=120s - --postgres-max-open-conns=500 - --postgres-max-idle-conns=100 - --experimental-features - --otel-metrics-keep-in-memory & + - name: Setup Env + uses: ./.github/actions/env + with: + token: ${{ secrets.NUMARY_GITHUB_TOKEN }} - run: > - earthly - --allow-privileged - ${{ contains(github.event.pull_request.labels.*.name, 'no-cache') && '--no-cache' || '' }} - ./test/performance+run --args="-benchtime 10s --ledger.url=http://localhost:3068 --parallelism=5" --locally=yes + /nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" + develop --impure --command just + --justfile ./test/performance/justfile + --working-directory ./test/performance + run ${{ inputs.bench }} ${{ inputs.parallelism }} ${{ inputs.duration }} 1 - run: > - earthly - --allow-privileged - ${{ contains(github.event.pull_request.labels.*.name, 'no-cache') && '--no-cache' || '' }} - ./test/performance+generate-graphs - - run: kill -9 $(ps aux | grep "ledger serve"| grep -v "grep" | awk '{print $2}') - if: always() + /nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" + develop --impure --command just + --justfile ./test/performance/justfile + --working-directory ./test/performance + graphs - uses: actions/upload-artifact@v4 with: name: graphs diff --git a/flake.nix b/flake.nix index ce3863d9a..daa189554 100644 --- a/flake.nix +++ b/flake.nix @@ -101,6 +101,7 @@ just nodejs_22 self.packages.${system}.speakeasy + goperf ]; }; } diff --git a/test/performance/Earthfile b/test/performance/Earthfile deleted file mode 100644 index 20b0969fe..000000000 --- a/test/performance/Earthfile +++ /dev/null @@ -1,59 +0,0 @@ -VERSION 0.8 - -IMPORT github.com/formancehq/earthly:tags/v0.16.2 AS core - -run: - LOCALLY - ARG locally=no - ARG args="-bench=." - - IF [ $locally == "yes" ] - RUN rm -f ./report/benchmark-output.txt - RUN go test -run ^$ -tags it,local -report.file ./report/report.json -timeout 60m $args . | tee -a ./report/benchmark-output.txt - ELSE - FROM ../..+tidy - - ARG GOMAXPROCS=2 - ARG GOMEMLIMIT=1024MiB - - CACHE --id go-mod-cache /go/pkg/mod - CACHE --id go-cache /root/.cache/go-build - COPY *.go test/performance - COPY --dir scripts test/performance/ - - WORKDIR test/performance - RUN mkdir -p report - - WITH DOCKER --load=postgres:15-alpine=../../+postgres - RUN go test -run ^$ -tags it,local -report.file ./report/report.json -timeout 60m $args . | tee ./report/benchmark-output.txt - END - - SAVE ARTIFACT ./report/report.json - SAVE ARTIFACT ./report/benchmark-output.txt - END - -compare: - FROM core+builder-image - CACHE --id go-mod-cache /go/pkg/mod - CACHE --id go-cache /root/.cache/go-build - RUN go install golang.org/x/perf/cmd/benchstat@latest - ARG args="-bench=." - ARG rev=main - - COPY (+run/benchmark-output.txt --args=$args) /report/benchmark-output-local.txt - COPY --allow-privileged (github.com/formancehq/ledger/test/performance:${rev}+run/benchmark-output.txt --args=$args) /report/benchmark-output-remote.txt - - RUN benchstat /report/benchmark-output-remote.txt /report/benchmark-output-local.txt > benchmark-comparison.txt - - SAVE ARTIFACT benchmark-comparison.txt AS LOCAL benchmark-comparison.txt - -generate-graphs: - FROM core+base-image - RUN apk update && apk add nodejs npm - COPY charts /src - COPY ./report/report.json /report/report.json - WORKDIR /src - RUN npm install - RUN npm run build - RUN node index.js - SAVE ARTIFACT *.png AS LOCAL ./report/ diff --git a/test/performance/README.md b/test/performance/README.md index 8aef6bbaf..890624630 100644 --- a/test/performance/README.md +++ b/test/performance/README.md @@ -10,25 +10,25 @@ Scripts can be found in directory [scripts](./scripts). ## Run locally ```shell -earthly +run +just run ``` You can pass additional arguments (the underlying command is a standard `go test -bench=.`) using the flag `--args`. For example: ```shell -earthly +run --args="-benchtime 10s" +just run "-benchtime 10s" ``` ## Run on a remote stack ```shell -earthly +run --args="--stack.url=XXX --client.id=XXX --client.secret=XXX" +just run "--stack.url=XXX --client.id=XXX --client.secret=XXX" ``` ## Run on a remote ledger ```shell -earthly +run --args="--ledger.url=XXX --auth.url=XXX --client.id=XXX --client.secret=XXX" +just run "--ledger.url=XXX --auth.url=XXX --client.id=XXX --client.secret=XXX" ``` ## Results @@ -37,7 +37,7 @@ TPS is included as a benchmark metrics. You can generate some graphs using the command: ``` -earthly +generate-graphs +just graphs ``` See generated files in `report` directory. \ No newline at end of file diff --git a/test/performance/charts/.gitignore b/test/performance/charts/.gitignore new file mode 100644 index 000000000..a6c7c2852 --- /dev/null +++ b/test/performance/charts/.gitignore @@ -0,0 +1 @@ +*.js diff --git a/test/performance/charts/index.js b/test/performance/charts/index.js deleted file mode 100644 index cf68fabac..000000000 --- a/test/performance/charts/index.js +++ /dev/null @@ -1,51 +0,0 @@ -"use strict"; -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - var desc = Object.getOwnPropertyDescriptor(m, k); - if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { - desc = { enumerable: true, get: function() { return m[k]; } }; - } - Object.defineProperty(o, k2, desc); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { - Object.defineProperty(o, "default", { enumerable: true, value: v }); -}) : function(o, v) { - o["default"] = v; -}); -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); - __setModuleDefault(result, mod); - return result; -}; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -Object.defineProperty(exports, "__esModule", { value: true }); -const fs = __importStar(require("fs")); -require("chartjs-to-image"); -const graphs_1 = require("./src/graphs"); -const main = () => __awaiter(void 0, void 0, void 0, function* () { - let buffer = fs.readFileSync('../report/report.json', 'utf-8'); - let reports = JSON.parse(buffer); - yield (0, graphs_1.exportTPSGraph)({ - output: 'tps.png', - }, reports); - yield (0, graphs_1.exportLatencyGraph)({ - output: 'p99.png' - }, 'P99', reports); - yield (0, graphs_1.exportLatencyGraph)({ - output: 'p95.png' - }, 'P95', reports); -}); -main(); diff --git a/test/performance/charts/index.ts b/test/performance/charts/index.ts index e5ac02f89..ba4a566f6 100644 --- a/test/performance/charts/index.ts +++ b/test/performance/charts/index.ts @@ -6,15 +6,15 @@ const main = async () => { let buffer = fs.readFileSync('../report/report.json', 'utf-8'); let reports = JSON.parse(buffer); await exportTPSGraph({ - output: 'tps.png', + output: '../report/tps.png', }, reports); - await exportDatabaseStats('database_connections.png', reports); + await exportDatabaseStats('../report/database_connections.png', reports); const ps: (keyof MetricsTime)[] = ['P99', 'P95', 'P75', 'Avg'] for (let p of ps) { await exportLatencyGraph({ - output: p.toLowerCase() + '.png' + output: '../report/' + p.toLowerCase() + '.png' }, p, reports); } } diff --git a/test/performance/charts/src/colors.js b/test/performance/charts/src/colors.js deleted file mode 100644 index a66518a80..000000000 --- a/test/performance/charts/src/colors.js +++ /dev/null @@ -1,21 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.NAMED_COLORS = exports.CHART_COLORS = void 0; -exports.CHART_COLORS = { - red: 'rgb(255, 99, 132)', - orange: 'rgb(255, 159, 64)', - yellow: 'rgb(255, 205, 86)', - green: 'rgb(75, 192, 192)', - blue: 'rgb(54, 162, 235)', - purple: 'rgb(153, 102, 255)', - grey: 'rgb(201, 203, 207)' -}; -exports.NAMED_COLORS = [ - exports.CHART_COLORS.red, - exports.CHART_COLORS.orange, - exports.CHART_COLORS.yellow, - exports.CHART_COLORS.green, - exports.CHART_COLORS.blue, - exports.CHART_COLORS.purple, - exports.CHART_COLORS.grey, -]; diff --git a/test/performance/charts/src/graphs.js b/test/performance/charts/src/graphs.js deleted file mode 100644 index a38670561..000000000 --- a/test/performance/charts/src/graphs.js +++ /dev/null @@ -1,114 +0,0 @@ -"use strict"; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.exportLatencyGraph = exports.exportTPSGraph = void 0; -const colors_1 = require("./colors"); -const chartjs_to_image_1 = __importDefault(require("chartjs-to-image")); -const exportTPSGraph = (configuration, result) => __awaiter(void 0, void 0, void 0, function* () { - const scripts = []; - for (let script in result) { - scripts.push(script); - } - const reportsForAnyScript = result[scripts[0]]; - if (!reportsForAnyScript) { - throw new Error("no data"); - } - const datasets = scripts.map(((script, index) => { - return { - label: script, - data: result[script].map(r => r.tps), - backgroundColor: colors_1.NAMED_COLORS[index % scripts.length], - }; - })); - const config = { - type: 'bar', - data: { - labels: reportsForAnyScript - .map(r => r.configuration.name), - datasets: datasets - }, - options: { - plugins: { - title: { - display: true, - text: 'TPS' - }, - }, - responsive: true, - interaction: { - intersect: false, - }, - scales: { - x: { - stacked: true, - }, - y: { - stacked: true - } - } - } - }; - const chart = new chartjs_to_image_1.default(); - chart.setConfig(config); - yield chart.toFile(configuration.output); -}); -exports.exportTPSGraph = exportTPSGraph; -const exportLatencyGraph = (configuration, key, result) => __awaiter(void 0, void 0, void 0, function* () { - const scripts = []; - for (let script in result) { - scripts.push(script); - } - const reportsForAnyScript = result[scripts[0]]; - if (!reportsForAnyScript) { - throw new Error("no data"); - } - const datasets = scripts.map(((script, index) => { - return { - label: script, - data: result[script].map(r => r.metrics.Time[key].substring(0, r.metrics.Time[key].length - 2)), - backgroundColor: colors_1.NAMED_COLORS[index % scripts.length], - }; - })); - const config = { - type: 'bar', - data: { - labels: reportsForAnyScript - .map(r => r.configuration.name), - datasets: datasets - }, - options: { - plugins: { - title: { - display: true, - text: 'TPS' - }, - }, - interaction: { - intersect: false, - }, - scales: { - x: { - stacked: true, - }, - y: { - stacked: true - } - } - } - }; - const chart = new chartjs_to_image_1.default(); - chart.setConfig(config); - yield chart.toFile(configuration.output); -}); -exports.exportLatencyGraph = exportLatencyGraph; diff --git a/test/performance/charts/src/report.js b/test/performance/charts/src/report.js deleted file mode 100644 index 3918c74e4..000000000 --- a/test/performance/charts/src/report.js +++ /dev/null @@ -1 +0,0 @@ -"use strict"; diff --git a/test/performance/env_testserver_test.go b/test/performance/env_testserver_test.go index 6c87c8766..819ab49f8 100644 --- a/test/performance/env_testserver_test.go +++ b/test/performance/env_testserver_test.go @@ -10,7 +10,6 @@ import ( ledgerclient "github.com/formancehq/ledger/pkg/client" "io" "os" - "sync" "testing" "github.com/formancehq/go-libs/v2/pointer" @@ -44,17 +43,11 @@ var _ Env = (*TestServerEnv)(nil) type TestServerEnvFactory struct { dockerPool *docker.Pool - - once sync.Once } func (f *TestServerEnvFactory) Create(ctx context.Context, b *testing.B, ledger ledger.Ledger) Env { - f.once.Do(func() { - // Configure the environment to run benchmarks locally. - // Start a docker connection - f.dockerPool = docker.NewPool(b, logging.Testing()) - }) + f.dockerPool = docker.NewPool(b, logging.Testing()) pgServer := pgtesting.CreatePostgresServer(b, f.dockerPool, pgtesting.WithPGCrypto()) diff --git a/test/performance/justfile b/test/performance/justfile new file mode 100755 index 000000000..f111fe189 --- /dev/null +++ b/test/performance/justfile @@ -0,0 +1,34 @@ +set dotenv-load +set positional-arguments + +tmpdir := `mktemp -d` + +run bench='.' p='1' benchtime='1s' count='1' output='./report/benchmark-output.txt': + mkdir -p $(dirname {{output}}) + rm -f {{output}} + go test -run ^$ -tags it,local \ + -report.file ./report/report.json \ + -timeout 600m \ + -bench={{bench}} \ + -count={{count}} \ + -p {{p}} \ + -test.benchtime {{benchtime}} . | tee -a {{output}} + +compare bench='.' p='1' benchtime='1s' count='1' output='./report/benchmark-output.txt': + trap 'rm -rf {{tmpdir}}' EXIT + just run {{bench}} {{p}} {{benchtime}} {{count}} './report/benchmark-output-local.txt' + git clone --depth 1 -b main https://github.com/formancehq/ledger {{tmpdir}} + go test -run ^$ -tags it,local -report.file \ + ./report/report.json \ + -timeout 600m \ + -bench={{bench}} \ + -count={{count}} \ + -p {{p}} \ + -test.benchtime {{benchtime}} \ + {{tmpdir}}/test/performance | tee -a ./report/benchmark-output-main.txt + benchstat ./report/benchmark-output-main.txt ./report/benchmark-output-local.txt > ./report/benchmark-comparison.txt || true + +graphs: + cd charts && npm install + cd charts && npm run build + cd charts && node ./index.js \ No newline at end of file