forked from pmem/pmemkv-bench
-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add supprt for running multiple test cases for one build
Instead of passing JSON, user may pass python script with implemented method generate(), which returns dictionary. Such dictionary is validated against json schema file. To do so, build and benchmark configurations was split into two files. generate_obj_based_scope.py may be run as standalone application, which prints configuration json (also validated against schema) to stdout. Such once generated json may be saved and passed to run_benchmark.py. ref pmem#45
- Loading branch information
Pawel Karczewski
committed
Feb 19, 2021
1 parent
0da3c06
commit a18cc0d
Showing
7 changed files
with
282 additions
and
64 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
14 changes: 6 additions & 8 deletions
14
bench_scenarios/basic.json → bench_scenarios/basic_build.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
{ | ||
"$schema": "http://json-schema.org/draft-04/schema#", | ||
"type": "array", | ||
"items": [ | ||
{ | ||
"type": "object", | ||
"properties": { | ||
"env": { | ||
"type": "object", | ||
"description": "Definition of environment variables passed to pmemkv-bench run." | ||
}, | ||
"params": { | ||
"type": "object", | ||
"description": "Parameters passed to pmemkv-bench binary", | ||
"properties": {}, | ||
"required": [ | ||
"--benchmarks", | ||
"--value_size", | ||
"--threads", | ||
"--engine", | ||
"--num", | ||
"--db", | ||
"--db_size_in_gb" | ||
] | ||
} | ||
}, | ||
"required": [ | ||
"env", | ||
"params" | ||
] | ||
} | ||
] | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
#!/usr/bin/env python3 | ||
# | ||
# SPDX-License-Identifier: Apache-2.0 | ||
# Copyright 2021, Intel Corporation | ||
|
||
# This script implements generate() method, which may be invoked by run_benchmark.py directly, | ||
# or used as standalone application, which prints configuration json (also validated against schema) | ||
# to stdout. Such once generated json may be saved and passed to run_benchmark.py as a parameter | ||
|
||
import json | ||
import itertools | ||
import jsonschema | ||
import os | ||
|
||
benchmarks = [ | ||
"fillseq", | ||
"fillrandom", | ||
"fillseq,readrandom,readrandom", | ||
"fillrandom,readrandom,readrandom", | ||
"fillseq,readseq,readseq", | ||
"fillrandom,readseq,readseq", | ||
"fillseq,readwhilewriting", | ||
"fillseq,readrandomwriterandom", | ||
] | ||
size = [8, 128] | ||
number_of_elements = 100000000 | ||
|
||
|
||
def concurrent_engines(): | ||
|
||
number_of_threads = [1, 4, 8, 12, 18, 24] | ||
engine = ["cmap", "csmap"] | ||
|
||
result = itertools.product(benchmarks, size, number_of_threads, engine) | ||
return list(result) | ||
|
||
|
||
def single_threaded_engines(): | ||
number_of_threads = [1] | ||
engine = ["radix", "stree"] | ||
result = itertools.product(benchmarks, size, number_of_threads, engine) | ||
return list(result) | ||
|
||
|
||
def generate(): | ||
benchmarks = concurrent_engines() | ||
benchmarks.extend(single_threaded_engines()) | ||
benchmarks_configuration = [] | ||
db_path = os.getenv("PMEMKV_BENCH_DB_PATH", "/mnt/pmem0/pmemkv-bench") | ||
for benchmark in benchmarks: | ||
benchmark_settings = { | ||
"env": { | ||
"NUMACTL_CPUBIND": f"file:{os.path.dirname(db_path)}", | ||
}, | ||
"params": { | ||
"--benchmarks": f"{benchmark[0]}", | ||
"--value_size": f"{benchmark[1]}", | ||
"--threads": f"{benchmark[2]}", | ||
"--engine": f"{benchmark[3]}", | ||
"--num": f"{number_of_elements}", | ||
"--db": db_path, | ||
"--db_size_in_gb": "200", | ||
}, | ||
} | ||
|
||
benchmarks_configuration.append(benchmark_settings) | ||
|
||
return benchmarks_configuration | ||
|
||
|
||
if __name__ == "__main__": | ||
output = generate() | ||
schema = None | ||
with open("bench.schema.json", "r") as schema_file: | ||
schema = json.loads(schema_file.read()) | ||
try: | ||
jsonschema.validate(instance=output, schema=schema) | ||
except jsonschema.exceptions.ValidationError as e: | ||
print(e) | ||
exit(1) | ||
print(json.dumps(output, indent=4)) |
Oops, something went wrong.