forked from sustainable-computing-io/kepler-model-server
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel_server_test.py
83 lines (70 loc) · 3.51 KB
/
model_server_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import requests
import os
import sys
import shutil
import json
import codecs
src_path = os.path.join(os.path.dirname(__file__), '../src')
server_path = os.path.join(os.path.dirname(__file__), '../src/server')
util_path = os.path.join(os.path.dirname(__file__), '../src/util')
sys.path.append(src_path)
sys.path.append(util_path)
sys.path.append(server_path)
from train_types import FeatureGroup, FeatureGroups, ModelOutputType
from model_server import MODEL_SERVER_PORT
from config import download_path
def get_model_request_json(metrics, output_type, node_type, weight, trainer_name, energy_source):
return {"metrics": metrics, "output_type": output_type.name, "node_type": node_type, "weight": weight, "trainer_name": trainer_name, "source": energy_source}
TMP_FILE = 'download.zip'
def make_request(metrics, output_type, node_type=-1, weight=False, trainer_name="", energy_source='rapl-sysfs'):
model_request = get_model_request_json(metrics, output_type, node_type, weight, trainer_name, energy_source)
response = requests.post('http://localhost:{}/model'.format(MODEL_SERVER_PORT), json=model_request)
assert response.status_code == 200, response.text
if weight:
weight_dict = json.loads(response.text)
assert len(weight_dict) > 0, "weight dict must contain one or more than one component"
for weight_values in weight_dict.values():
weight_length = len(weight_values['All_Weights']['Numerical_Variables'])
expected_length = len(metrics)
assert weight_length <= expected_length, "weight metrics should covered by the requested {} > {}".format(weight_length, expected_length)
else:
output_path = os.path.join(download_path, output_type.name)
if os.path.exists(output_path):
shutil.rmtree(output_path)
with codecs.open(TMP_FILE, 'wb') as f:
f.write(response.content)
shutil.unpack_archive(TMP_FILE, output_path)
os.remove(TMP_FILE)
def get_models():
response = requests.get('http://localhost:{}/best-models'.format(MODEL_SERVER_PORT))
assert response.status_code == 200, response.text
response = json.loads(response.text)
return response
if __name__ == '__main__':
models = get_models()
assert len(models) > 0, "more than one type of output"
for output_models in models.values():
assert len(output_models) > 0, "more than one best model for each output"
test_feature_groups = [FeatureGroup.BPFOnly, FeatureGroup.CounterOnly]
# for each features
for fg in test_feature_groups:
metrics = FeatureGroups[fg]
# abs power
output_type = ModelOutputType.AbsPower
make_request(metrics, output_type)
make_request(metrics, output_type, weight=True)
# dyn power
output_type = ModelOutputType.DynPower
make_request(metrics, output_type)
make_request(metrics, output_type, weight=True)
metrics = FeatureGroups[FeatureGroup.BPFOnly]
# with node_type
make_request(metrics, output_type, node_type=1)
make_request(metrics, output_type, node_type=1, weight=True)
# with trainer name
trainer_name = "SGDRegressorTrainer"
make_request(metrics, output_type, trainer_name=trainer_name)
make_request(metrics, output_type, trainer_name=trainer_name, node_type=1)
make_request(metrics, output_type, trainer_name=trainer_name, node_type=1, weight=True)
# with acpi source
make_request(metrics, output_type, energy_source="acpi", trainer_name=trainer_name, node_type=1, weight=True)