Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/mvp_demo' into suppress-sqlExc…
Browse files Browse the repository at this point in the history
…eption-logs
  • Loading branch information
khansaad committed Dec 7, 2023
2 parents 99e912b + b9695d4 commit 0d75805
Show file tree
Hide file tree
Showing 10 changed files with 256 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
spec:
containers:
- name: kruize
image: kruize/autotune_operator:0.0.20_rm
image: kruize/autotune_operator:0.0.20.1_rm
imagePullPolicy: Always
volumeMounts:
- name: config-volume
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
spec:
containers:
- name: kruize
image: kruize/autotune_operator:0.0.20_rm
image: kruize/autotune_operator:0.0.20.1_rm
imagePullPolicy: Always
volumeMounts:
- name: config-volume
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ spec:
spec:
containers:
- name: kruize
image: kruize/autotune_operator:0.0.20_rm
image: kruize/autotune_operator:0.0.20.1_rm
imagePullPolicy: Always
volumeMounts:
- name: config-volume
Expand Down Expand Up @@ -208,7 +208,7 @@ spec:
spec:
containers:
- name: kruizecronjob
image: kruize/autotune_operator:0.0.20_rm
image: kruize/autotune_operator:0.0.20.1_rm
imagePullPolicy: Always
volumeMounts:
- name: config-volume
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ spec:
serviceAccountName: kruize-sa
containers:
- name: kruize
image: kruize/autotune_operator:0.0.20_rm
image: kruize/autotune_operator:0.0.20.1_rm
imagePullPolicy: Always
volumeMounts:
- name: config-volume
Expand Down Expand Up @@ -261,7 +261,7 @@ spec:
spec:
containers:
- name: kruizecronjob
image: kruize/autotune_operator:0.0.20_rm
image: kruize/autotune_operator:0.0.20.1_rm
imagePullPolicy: Always
volumeMounts:
- name: config-volume
Expand Down
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

<groupId>org.autotune</groupId>
<artifactId>autotune</artifactId>
<version>0.0.20_mvp</version>
<version>0.0.20.1_mvp</version>
<properties>
<fabric8-version>4.13.0</fabric8-version>
<org-json-version>20201115</org-json-version>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ public static String validateMetricsValues(String metricVariableName, MetricResu
if (value instanceof String) {
stringValue = (String) value;
}
// TODO: handle the conversions for additional supported formats
if (!KruizeSupportedTypes.SUPPORTED_FORMATS.contains(stringValue)) {
LOGGER.error(AnalyzerErrorConstants.AutotuneObjectErrors.UNSUPPORTED_FORMAT);
errorMsg = errorMsg.concat(AnalyzerErrorConstants.AutotuneObjectErrors.UNSUPPORTED_FORMAT);
Expand Down
2 changes: 1 addition & 1 deletion src/main/java/com/autotune/utils/KruizeSupportedTypes.java
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ private KruizeSupportedTypes() { }
new HashSet<>(Arrays.asList("deployment", "pod", "container"));

public static final Set<String> SUPPORTED_FORMATS =
new HashSet<>(Arrays.asList("cores", "MiB"));
new HashSet<>(Arrays.asList("cores", "m", "Bytes", "bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "kB", "KB", "MB", "GB", "TB", "PB", "EB", "K", "k", "M", "G", "T", "P", "E"));

public static final Set<String> QUERY_PARAMS_SUPPORTED = new HashSet<>(Arrays.asList(
"experiment_name", "results", "recommendations", "latest"
Expand Down
31 changes: 27 additions & 4 deletions tests/scripts/remote_monitoring_tests/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,13 +394,22 @@ def validate_container(update_results_container, update_results_json, list_reco_
interval_end_time = update_results["interval_end_time"]
interval_start_time = update_results["interval_start_time"]
print(f"interval_end_time = {interval_end_time} interval_start_time = {interval_start_time}")

# Obtain the metrics
metrics = ""
containers = update_results['kubernetes_objects'][0]['containers']
for container in containers:
if update_results_container["container_image_name"] == container["container_image_name"]:
metrics = container["metrics"]

if check_if_recommendations_are_present(list_reco_container["recommendations"]):
terms_obj = list_reco_container["recommendations"]["data"][interval_end_time]["recommendation_terms"]
current_config = list_reco_container["recommendations"]["data"][interval_end_time]["current"]

duration_terms = ["short_term", "medium_term", "long_term"]
for term in duration_terms:
if check_if_recommendations_are_present(terms_obj[term]):
print(f"reco present for term {term}")
# Validate timestamps [deprecated as monitoring end time is moved to higher level]
# assert cost_obj[term]["monitoring_end_time"] == interval_end_time, \
# f"monitoring end time {cost_obj[term]['monitoring_end_time']} did not match end timestamp {interval_end_time}"
Expand Down Expand Up @@ -439,7 +448,7 @@ def validate_container(update_results_container, update_results_json, list_reco_
for engine_entry in engines_list:
if engine_entry in terms_obj[term]["recommendation_engines"]:
engine_obj = terms_obj[term]["recommendation_engines"][engine_entry]
validate_config(engine_obj["config"])
validate_config(engine_obj["config"], metrics)
validate_variation(current_config, engine_obj["config"], engine_obj["variation"])

else:
Expand All @@ -452,17 +461,27 @@ def validate_container(update_results_container, update_results_json, list_reco_
assert result == False, f"Recommendations notifications does not contain the expected message - {NOT_ENOUGH_DATA_MSG}"


def validate_config(reco_config):
def validate_config(reco_config, metrics):
cpu_format_type = ""
memory_format_type = ""

for metric in metrics:
if "cpuUsage" == metric["name"]:
cpu_format_type = metric['results']['aggregation_info']['format']

if "memoryUsage" == metric["name"]:
memory_format_type = metric['results']['aggregation_info']['format']

usage_list = ["requests", "limits"]
for usage in usage_list:
assert reco_config[usage]["cpu"][
"amount"] > 0, f"cpu amount in recommendation config is {reco_config[usage]['cpu']['amount']}"
assert reco_config[usage]["cpu"][
"format"] == "cores", f"cpu format in recommendation config is {reco_config[usage]['cpu']['format']}"
"format"] == cpu_format_type, f"cpu format in recommendation config is {reco_config[usage]['cpu']['format']} instead of {cpu_format_type}"
assert reco_config[usage]["memory"][
"amount"] > 0, f"cpu amount in recommendation config is {reco_config[usage]['memory']['amount']}"
assert reco_config[usage]["memory"][
"format"] == "MiB", f"memory format in recommendation config is {reco_config[usage]['memory']['format']}"
"format"] == memory_format_type, f"memory format in recommendation config is {reco_config[usage]['memory']['format']} instead of {memory_format_type}"


def check_if_recommendations_are_present(cost_obj):
Expand Down Expand Up @@ -635,20 +654,24 @@ def validate_variation(current_config: dict, recommended_config: dict, variation
current_cpu_value = current_requests[CPU_KEY][AMOUNT_KEY]
assert variation_requests[CPU_KEY][AMOUNT_KEY] == recommended_requests[CPU_KEY][
AMOUNT_KEY] - current_cpu_value
assert variation_requests[CPU_KEY][FORMAT_KEY] == recommended_requests[CPU_KEY][FORMAT_KEY]
if MEMORY_KEY in recommended_requests:
if MEMORY_KEY in current_requests and AMOUNT_KEY in current_requests[MEMORY_KEY]:
current_memory_value = current_requests[MEMORY_KEY][AMOUNT_KEY]
assert variation_requests[MEMORY_KEY][AMOUNT_KEY] == recommended_requests[MEMORY_KEY][
AMOUNT_KEY] - current_memory_value
assert variation_requests[MEMORY_KEY][FORMAT_KEY] == recommended_requests[MEMORY_KEY][FORMAT_KEY]
if recommended_limits is not None:
current_cpu_value = 0
current_memory_value = 0
if CPU_KEY in recommended_limits:
if CPU_KEY in current_limits and AMOUNT_KEY in current_limits[CPU_KEY]:
current_cpu_value = current_limits[CPU_KEY][AMOUNT_KEY]
assert variation_limits[CPU_KEY][AMOUNT_KEY] == recommended_limits[CPU_KEY][AMOUNT_KEY] - current_cpu_value
assert variation_limits[CPU_KEY][FORMAT_KEY] == recommended_limits[CPU_KEY][FORMAT_KEY]
if MEMORY_KEY in recommended_limits:
if MEMORY_KEY in current_limits and AMOUNT_KEY in current_limits[MEMORY_KEY]:
current_memory_value = current_limits[MEMORY_KEY][AMOUNT_KEY]
assert variation_limits[MEMORY_KEY][AMOUNT_KEY] == recommended_limits[MEMORY_KEY][
AMOUNT_KEY] - current_memory_value
assert variation_limits[MEMORY_KEY][FORMAT_KEY] == recommended_limits[MEMORY_KEY][FORMAT_KEY]
Original file line number Diff line number Diff line change
Expand Up @@ -255,14 +255,23 @@ def test_list_recommendations_single_exp_multiple_results(cluster_type):
result_json_file = "../json_files/multiple_results_single_exp.json"
response = update_results(result_json_file)

# Get the experiment name
json_data = json.load(open(input_json_file))
experiment_name = json_data[0]['experiment_name']
end_time = "2023-04-14T23:59:20.982Z"

data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG

# Get the experiment name
json_data = json.load(open(input_json_file))
experiment_name = json_data[0]['experiment_name']
response = update_recommendations(experiment_name, None, end_time)
data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data[0]['experiment_name'] == experiment_name
assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications'][
NOTIFICATION_CODE_FOR_RECOMMENDATIONS_AVAILABLE]['message'] == RECOMMENDATIONS_AVAILABLE


response = list_recommendations(experiment_name)

Expand All @@ -276,18 +285,101 @@ def test_list_recommendations_single_exp_multiple_results(cluster_type):
# Validate the json values
create_exp_json = read_json_data_from_file(input_json_file)

# Uncomment the below lines when bulk entries are allowed
# update_results_json = read_json_data_from_file(result_json_file)
expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX

# Since bulk entries are not supported passing None for update results json
update_results_json = None
validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0])
update_results_json = []
result_json_arr = read_json_data_from_file(result_json_file)
update_results_json.append(result_json_arr[len(result_json_arr) - 1])
validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours)

# Delete the experiment
response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)


@pytest.mark.sanity
@pytest.mark.parametrize("memory_format_type", ["bytes", "Bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "kB", "KB", "MB", "GB", "TB", "PB", "EB", "K", "k", "M", "G", "T", "P", "E"])
@pytest.mark.parametrize("cpu_format_type", ["cores", "m"])

def test_list_recommendations_supported_metric_formats(memory_format_type, cpu_format_type, cluster_type):
"""
Test Description: This test validates listRecommendations by updating multiple results for a single experiment
"""
input_json_file = "../json_files/create_exp.json"

form_kruize_url(cluster_type)
response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)

# Create experiment using the specified json
response = create_experiment(input_json_file)

data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == CREATE_EXP_SUCCESS_MSG

# Update results for the experiment
result_json_file = "../json_files/multiple_results_single_exp.json"


# Update the memory format and cpu format
result_json = read_json_data_from_file(result_json_file)

for result in result_json:
metrics = result['kubernetes_objects'][0]['containers'][0]['metrics']

for metric in metrics:
if "cpu" in metric['name']:
metric['results']['aggregation_info']['format'] = cpu_format_type

if "memory" in metric['name']:
metric['results']['aggregation_info']['format'] = memory_format_type

tmp_update_results_json_file = "/tmp/update_results_metric" + "_" + memory_format_type + "_" + cpu_format_type + ".json"
write_json_data_to_file(tmp_update_results_json_file, result_json)

# Update the results
response = update_results(tmp_update_results_json_file)

data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG

# Get the experiment name
json_data = json.load(open(input_json_file))
experiment_name = json_data[0]['experiment_name']
end_time = "2023-04-14T23:59:20.982Z"

response = update_recommendations(experiment_name, None, end_time)
data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data[0]['experiment_name'] == experiment_name
assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications'][
NOTIFICATION_CODE_FOR_RECOMMENDATIONS_AVAILABLE]['message'] == RECOMMENDATIONS_AVAILABLE

response = list_recommendations(experiment_name)

list_reco_json = response.json()
assert response.status_code == SUCCESS_200_STATUS_CODE

# Validate the json against the json schema
errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema)
assert errorMsg == ""

# Validate the json values
create_exp_json = read_json_data_from_file(input_json_file)

expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX

update_results_json = []
result_json_arr = read_json_data_from_file(tmp_update_results_json_file)
update_results_json.append(result_json_arr[len(result_json_arr) - 1])

validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours)


@pytest.mark.extended
def test_list_recommendations_multiple_exps_from_diff_json_files_2(cluster_type):
"""
Expand Down
Loading

0 comments on commit 0d75805

Please sign in to comment.