Skip to content

Commit

Permalink
Add supprt for running multiple test cases for one build
Browse files Browse the repository at this point in the history
Instead of passing JSON, user may pass python script with implemented method generate(), which returns
dictionary. Such dictionary is validated against json schema file. To do so, build and benchmark
configurations was split into two files.

generate_obj_based_scope.py may be run as standalone application, which
prints configuration json (also validated against schema) to stdout. Such once generated json may be saved
and passed to run_benchmark.py.

ref pmem#45
  • Loading branch information
Pawel Karczewski committed Feb 19, 2021
1 parent 0da3c06 commit a18cc0d
Show file tree
Hide file tree
Showing 7 changed files with 282 additions and 64 deletions.
1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ RUN apt update && \
DEBIAN_FRONTEND=noninteractive apt install -y --no-install-recommends \
clang-format-10 \
python3-pymongo \
python3-jsonschema \
python3-pytest \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
Expand Down
14 changes: 6 additions & 8 deletions bench_scenarios/basic.json → bench_scenarios/basic_build.json
Original file line number Diff line number Diff line change
@@ -1,21 +1,19 @@
{
"db_bench": {
"repo_url": "https://github.com/pmem/pmemkv-tools.git",
"repo_url": "https://github.com/pmem/pmemkv-bench.git",
"commit": "HEAD",
"env": {
"PMEM_IS_PMEM_FORCE": "1"
},
"params": [
"--db=/dev/shm/pmemkv",
"--db_size_in_gb=1"
]
"env": {}
},
"pmemkv": {
"repo_url": "https://github.com/pmem/pmemkv.git",
"commit": "HEAD",
"cmake_params": [
"-DCMAKE_BUILD_TYPE=Release",
"-DENGINE_CMAP=1",
"-DENGINE_CSMAP=1",
"-DENGINE_RADIX=1",
"-DENGINE_STREE=1",
"-DENGINE_ROBINHOOD=1",
"-DBUILD_JSON_CONFIG=1",
"-DCXX_STANDARD=20",
"-DBUILD_TESTS=OFF",
Expand Down
33 changes: 33 additions & 0 deletions bench_scenarios/bench.schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": [
{
"type": "object",
"properties": {
"env": {
"type": "object",
"description": "Definition of environment variables passed to pmemkv-bench run."
},
"params": {
"type": "object",
"description": "Parameters passed to pmemkv-bench binary",
"properties": {},
"required": [
"--benchmarks",
"--value_size",
"--threads",
"--engine",
"--num",
"--db",
"--db_size_in_gb"
]
}
},
"required": [
"env",
"params"
]
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,11 @@
"type": "object",
"description": "Definition of environment variables passed to pmemkv-bench run.",
"properties": {}
},
"params": {
"description": "Parameters passed to pmemkv-bench binary",
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [
"commit",
"env",
"params",
"repo_url"
]
},
Expand Down
81 changes: 81 additions & 0 deletions bench_scenarios/generate_obj_based_scope.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021, Intel Corporation

# This script implements generate() method, which may be invoked by run_benchmark.py directly,
# or used as standalone application, which prints configuration json (also validated against schema)
# to stdout. Such once generated json may be saved and passed to run_benchmark.py as a parameter

import json
import itertools
import jsonschema
import os

benchmarks = [
"fillseq",
"fillrandom",
"fillseq,readrandom,readrandom",
"fillrandom,readrandom,readrandom",
"fillseq,readseq,readseq",
"fillrandom,readseq,readseq",
"fillseq,readwhilewriting",
"fillseq,readrandomwriterandom",
]
size = [8, 128]
number_of_elements = 100000000


def concurrent_engines():

number_of_threads = [1, 4, 8, 12, 18, 24]
engine = ["cmap", "csmap"]

result = itertools.product(benchmarks, size, number_of_threads, engine)
return list(result)


def single_threaded_engines():
number_of_threads = [1]
engine = ["radix", "stree"]
result = itertools.product(benchmarks, size, number_of_threads, engine)
return list(result)


def generate():
benchmarks = concurrent_engines()
benchmarks.extend(single_threaded_engines())
benchmarks_configuration = []
db_path = os.getenv("PMEMKV_BENCH_DB_PATH", "/mnt/pmem0/pmemkv-bench")
for benchmark in benchmarks:
benchmark_settings = {
"env": {
"NUMACTL_CPUBIND": f"file:{os.path.dirname(db_path)}",
},
"params": {
"--benchmarks": f"{benchmark[0]}",
"--value_size": f"{benchmark[1]}",
"--threads": f"{benchmark[2]}",
"--engine": f"{benchmark[3]}",
"--num": f"{number_of_elements}",
"--db": db_path,
"--db_size_in_gb": "200",
},
}

benchmarks_configuration.append(benchmark_settings)

return benchmarks_configuration


if __name__ == "__main__":
output = generate()
schema = None
with open("bench.schema.json", "r") as schema_file:
schema = json.loads(schema_file.read())
try:
jsonschema.validate(instance=output, schema=schema)
except jsonschema.exceptions.ValidationError as e:
print(e)
exit(1)
print(json.dumps(output, indent=4))
Loading

0 comments on commit a18cc0d

Please sign in to comment.