From de51b2d0ec7b42d65c0b2314dc4c49d6b6ababe2 Mon Sep 17 00:00:00 2001 From: Shreya Date: Fri, 26 Apr 2024 18:12:26 +0530 Subject: [PATCH 01/10] Implement local monitoring testsuite and refactor common helpers --- tests/scripts/common/common_functions.sh | 3 +- tests/scripts/functional_tests.sh | 1 + .../helpers/__init__.py | 0 .../all_terms_list_reco_json_schema.py | 0 .../helpers/fixtures.py | 0 .../helpers/generate_datasource_json.py | 4 +- .../helpers/generate_rm_jsons.py | 2 +- .../helpers/kruize.py | 0 .../helpers/list_reco_json_schema.py | 0 .../helpers/list_reco_json_validate.py | 0 .../long_term_list_reco_json_schema.py | 0 ...ium_and_long_term_list_reco_json_schema.py | 0 .../medium_term_list_reco_json_schema.py | 0 ...ort_and_long_term_list_reco_json_schema.py | 0 ...t_and_medium_term_list_reco_json_schema.py | 0 .../short_term_list_reco_json_schema.py | 0 .../helpers/utils.py | 0 .../local_monitoring_tests/conftest.py | 5 + .../resource_optimization_openshift.json | 194 ++++++++++++++++++ .../local_monitoring_tests.sh | 156 ++++++++++++++ .../scripts/local_monitoring_tests/pytest.ini | 7 + .../local_monitoring_tests/requirements.txt | 4 + .../kruize_pod_restart_test.py | 2 +- .../rest_apis/test_create_experiment.py | 3 + .../rest_apis/test_e2e_workflow.py | 3 + .../rest_apis/test_list_recommendations.py | 2 + .../rest_apis/test_update_recommendations.py | 2 + .../rest_apis/test_update_results.py | 2 + tests/test_autotune.sh | 5 +- 29 files changed, 388 insertions(+), 7 deletions(-) rename tests/scripts/{remote_monitoring_tests => }/helpers/__init__.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/all_terms_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/fixtures.py (100%) rename tests/scripts/{local_monitoring_tests => }/helpers/generate_datasource_json.py (85%) rename tests/scripts/{remote_monitoring_tests => }/helpers/generate_rm_jsons.py (99%) rename tests/scripts/{remote_monitoring_tests => }/helpers/kruize.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/list_reco_json_validate.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/long_term_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/medium_and_long_term_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/medium_term_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/short_and_long_term_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/short_and_medium_term_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/short_term_list_reco_json_schema.py (100%) rename tests/scripts/{remote_monitoring_tests => }/helpers/utils.py (100%) create mode 100644 tests/scripts/local_monitoring_tests/conftest.py create mode 100644 tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift.json create mode 100644 tests/scripts/local_monitoring_tests/local_monitoring_tests.sh create mode 100644 tests/scripts/local_monitoring_tests/pytest.ini create mode 100644 tests/scripts/local_monitoring_tests/requirements.txt diff --git a/tests/scripts/common/common_functions.sh b/tests/scripts/common/common_functions.sh index 37f736475..ddd040b88 100755 --- a/tests/scripts/common/common_functions.sh +++ b/tests/scripts/common/common_functions.sh @@ -45,7 +45,8 @@ TEST_SUITE_ARRAY=("app_autotune_yaml_tests" "autotune_id_tests" "kruize_layer_id_tests" "em_standalone_tests" -"remote_monitoring_tests") +"remote_monitoring_tests" +"local_monitoring_tests") modify_kruize_layer_tests=("add_new_tunable" "apply_null_tunable" diff --git a/tests/scripts/functional_tests.sh b/tests/scripts/functional_tests.sh index b2b086801..13a814764 100755 --- a/tests/scripts/functional_tests.sh +++ b/tests/scripts/functional_tests.sh @@ -32,6 +32,7 @@ SCRIPTS_DIR="${CURRENT_DIR}" . ${SCRIPTS_DIR}/da/kruize_layer_id_tests.sh . ${SCRIPTS_DIR}/em/em_standalone_tests.sh . ${SCRIPTS_DIR}/remote_monitoring_tests/remote_monitoring_tests.sh +. ${SCRIPTS_DIR}/local_monitoring_tests/local_monitoring_tests.sh # Iterate through the commandline options while getopts i:o:r:-: gopts diff --git a/tests/scripts/remote_monitoring_tests/helpers/__init__.py b/tests/scripts/helpers/__init__.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/__init__.py rename to tests/scripts/helpers/__init__.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/all_terms_list_reco_json_schema.py b/tests/scripts/helpers/all_terms_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/all_terms_list_reco_json_schema.py rename to tests/scripts/helpers/all_terms_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/fixtures.py b/tests/scripts/helpers/fixtures.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/fixtures.py rename to tests/scripts/helpers/fixtures.py diff --git a/tests/scripts/local_monitoring_tests/helpers/generate_datasource_json.py b/tests/scripts/helpers/generate_datasource_json.py similarity index 85% rename from tests/scripts/local_monitoring_tests/helpers/generate_datasource_json.py rename to tests/scripts/helpers/generate_datasource_json.py index ec1c773a7..67537e601 100644 --- a/tests/scripts/local_monitoring_tests/helpers/generate_datasource_json.py +++ b/tests/scripts/helpers/generate_datasource_json.py @@ -18,7 +18,7 @@ def generate_datasource_json(csv_file, json_file): with open(json_file, 'w') as jsonfile: json.dump(datasources, jsonfile, indent=4) -csv_file_path = '../csv_data/datasources.csv' -json_file_path = '../json_files/datasources.json' +csv_file_path = '../local_monitoring_tests/csv_data/datasources.csv' +json_file_path = '../local_monitoring_tests/json_files/datasources.json' generate_datasource_json(csv_file_path, json_file_path) diff --git a/tests/scripts/remote_monitoring_tests/helpers/generate_rm_jsons.py b/tests/scripts/helpers/generate_rm_jsons.py similarity index 99% rename from tests/scripts/remote_monitoring_tests/helpers/generate_rm_jsons.py rename to tests/scripts/helpers/generate_rm_jsons.py index 833773905..1cc481a99 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/generate_rm_jsons.py +++ b/tests/scripts/helpers/generate_rm_jsons.py @@ -31,7 +31,7 @@ def convert_date_format(input_date_str): output_date_str = input_date.strftime("%Y-%m-%dT%H:%M:%S.000Z") return output_date_str -def create_exp_jsons(split = False, split_count = 1, exp_json_dir = "/tmp/exp_jsons", total_exps = 10): +def create_exp_jsons(split = False, split_count = 1, exp_json_dir = "/tmp/exp_jsons", total_exps = 10, target_cluster="remote"): complete_json_data = [] single_json_data = [] multi_json_data = [] diff --git a/tests/scripts/remote_monitoring_tests/helpers/kruize.py b/tests/scripts/helpers/kruize.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/kruize.py rename to tests/scripts/helpers/kruize.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/list_reco_json_schema.py b/tests/scripts/helpers/list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/list_reco_json_schema.py rename to tests/scripts/helpers/list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/list_reco_json_validate.py b/tests/scripts/helpers/list_reco_json_validate.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/list_reco_json_validate.py rename to tests/scripts/helpers/list_reco_json_validate.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/long_term_list_reco_json_schema.py b/tests/scripts/helpers/long_term_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/long_term_list_reco_json_schema.py rename to tests/scripts/helpers/long_term_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/medium_and_long_term_list_reco_json_schema.py b/tests/scripts/helpers/medium_and_long_term_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/medium_and_long_term_list_reco_json_schema.py rename to tests/scripts/helpers/medium_and_long_term_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/medium_term_list_reco_json_schema.py b/tests/scripts/helpers/medium_term_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/medium_term_list_reco_json_schema.py rename to tests/scripts/helpers/medium_term_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/short_and_long_term_list_reco_json_schema.py b/tests/scripts/helpers/short_and_long_term_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/short_and_long_term_list_reco_json_schema.py rename to tests/scripts/helpers/short_and_long_term_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/short_and_medium_term_list_reco_json_schema.py b/tests/scripts/helpers/short_and_medium_term_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/short_and_medium_term_list_reco_json_schema.py rename to tests/scripts/helpers/short_and_medium_term_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/short_term_list_reco_json_schema.py b/tests/scripts/helpers/short_term_list_reco_json_schema.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/short_term_list_reco_json_schema.py rename to tests/scripts/helpers/short_term_list_reco_json_schema.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/utils.py b/tests/scripts/helpers/utils.py similarity index 100% rename from tests/scripts/remote_monitoring_tests/helpers/utils.py rename to tests/scripts/helpers/utils.py diff --git a/tests/scripts/local_monitoring_tests/conftest.py b/tests/scripts/local_monitoring_tests/conftest.py new file mode 100644 index 000000000..b03f52085 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/conftest.py @@ -0,0 +1,5 @@ +def pytest_addoption(parser): + parser.addoption( + '--cluster_type', action='store', default='minikube', help='Cluster type' + ) + diff --git a/tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift.json b/tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift.json new file mode 100644 index 000000000..6949385be --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift.json @@ -0,0 +1,194 @@ +{ + "name": "resource-optimization-openshift", + "profile_version": 1, + "k8s_type": "openshift", + "slo": { + "slo_class": "resource_usage", + "direction": "minimize", + "objective_function": { + "function_type": "source" + }, + "function_variables": [ + { + "name": "cpuRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})" + }, + { + "function": "sum", + "query": "sum(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})" + } + ] + }, + { + "name": "cpuLimit", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(kube_pod_container_resource_limits{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})" + }, + { + "function": "sum", + "query": "sum(kube_pod_container_resource_limits{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\", resource=\"cpu\", unit=\"core\"})" + } + ] + }, + { + "name": "cpuUsage", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(avg_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=”$CONTAINER_NAME$”}[15m]))", + "versions": "<=4.8" + }, + { + "function": "avg", + "query": "avg(avg_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=”$CONTAINER_NAME$”}[15m]))", + "versions": ">4.9" + }, + { + "function": "min", + "query": "min(min_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=\"$CONTAINER_NAME$\"}[15m]))", + "versions": "<=4.8" + }, + { + "function": "min", + "query": "min(min_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=\"$CONTAINER_NAME$\"}[15m]))", + "versions": ">4.9" + }, + { + "function": "max", + "query": "max(max_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=\"$CONTAINER_NAME$\"}[15m]))", + "versions": "<=4.8" + }, + { + "function": "max", + "query": "max(max_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=\"$CONTAINER_NAME$\"}[15m]))", + "versions": ">4.9" + }, + { + "function": "sum", + "query": "sum(avg_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=\"$CONTAINER_NAME$\"}[15m]))", + "versions": "<=4.8" + }, + { + "function": "sum", + "query": "sum(avg_over_time(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=\"$CONTAINER_NAME$\"}[15m]))", + "versions": ">4.9" + } + ] + }, + { + "name": "cpuThrottle", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(rate(container_cpu_cfs_throttled_seconds_total{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=”$CONTAINER_NAME$”}[15m]))" + }, + { + "function": "max", + "query": "max(rate(container_cpu_cfs_throttled_seconds_total{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=”$CONTAINER_NAME$”}[15m]))" + }, + { + "function": "sum", + "query": "sum(rate(container_cpu_cfs_throttled_seconds_total{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=\"$NAMESPACE$\", container=”$CONTAINER_NAME$”}[15m]))" + } + ] + }, + { + "name": "memoryRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=”$CONTAINER_NAME$”, namespace=”$NAMESPACE”, resource=\"memory\", unit=\"byte\"})" + }, + { + "function": "sum", + "query": "sum(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=”$CONTAINER_NAME$”, namespace=”$NAMESPACE”, resource=\"memory\", unit=\"byte\"})" + } + ] + }, + { + "name": "memoryLimit", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(kube_pod_container_resource_limits{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"memory\", unit=\"byte\"})" + }, + { + "function": "sum", + "query": "sum(kube_pod_container_resource_limits{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=”$CONTAINER_NAME$”, namespace=”$NAMESPACE”, resource=\"memory\", unit=\"byte\"})" + } + ] + }, + { + "name": "memoryUsage", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(avg_over_time(container_memory_working_set_bytes{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=”$CONTAINER_NAME$”}[15m]))" + }, + { + "function": "min", + "query": "min(min_over_time(container_memory_working_set_bytes{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=\"$CONTAINER_NAME$\"}[15m]))" + }, + { + "function": "max", + "query": "max(max_over_time(container_memory_working_set_bytes{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=\"$CONTAINER_NAME$\"}[15m]))" + }, + { + "function": "sum", + "query": "sum(avg_over_time(container_memory_working_set_bytes{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=\"$CONTAINER_NAME$\"}[15m]))" + } + ] + }, + { + "name": "memoryRSS", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(avg_over_time(container_memory_rss{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=”$CONTAINER_NAME$”}[15m]))" + }, + { + "function": "min", + "query": "min(min_over_time(container_memory_rss{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=\"$CONTAINER_NAME$\"}[15m]))" + }, + { + "function": "max", + "query": "max(max_over_time(container_memory_rss{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=\"$CONTAINER_NAME$\"}[15m]))" + }, + { + "function": "sum", + "query": "sum(avg_over_time(container_memory_rss{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", namespace=$NAMESPACE$, container=”$CONTAINER_NAME$”}[15m]))" + } + ] + } + ] + } +} diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh new file mode 100644 index 000000000..39fb56f7a --- /dev/null +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -0,0 +1,156 @@ +#!/bin/bash +# +# Copyright (c) 2023, 2023 Red Hat, IBM Corporation and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +##### Script to perform basic tests for EM ##### + + +# Get the absolute path of current directory +CURRENT_DIR="$(dirname "$(realpath "$0")")" +LOCAL_MONITORING_TEST_DIR="${CURRENT_DIR}/local_monitoring_tests" + +# Source the common functions scripts +. ${LOCAL_MONITORING_TEST_DIR}/../common/common_functions.sh + +# Tests to validate Local monitoring mode in Kruize +function local_monitoring_tests() { + start_time=$(get_date) + FAILED_CASES=() + TESTS_FAILED=0 + TESTS_PASSED=0 + TESTS=0 + failed=0 + marker_options="" + ((TOTAL_TEST_SUITES++)) + + python3 --version >/dev/null 2>/dev/null + err_exit "ERROR: python3 not installed" + + target="crc" + perf_profile_json="${LOCAL_MONITORING_TEST_DIR}/json_files/resource_optimization_openshift.json" + + local_monitoring_tests=("test_e2e" "sanity" "extended" "negative") + + # check if the test case is supported + if [ ! -z "${testcase}" ]; then + check_test_case "local_monitoring" + fi + + # create the result directory for given testsuite + echo "" + TEST_SUITE_DIR="${RESULTS}/local_monitoring_tests" + KRUIZE_SETUP_LOG="${TEST_SUITE_DIR}/kruize_setup.log" + KRUIZE_POD_LOG="${TEST_SUITE_DIR}/kruize_pod.log" + + mkdir -p ${TEST_SUITE_DIR} + + # Setup kruize + if [ ${skip_setup} -eq 0 ]; then + echo "Setting up kruize..." | tee -a ${LOG} + echo "${KRUIZE_SETUP_LOG}" + setup "${KRUIZE_POD_LOG}" >> ${KRUIZE_SETUP_LOG} 2>&1 + echo "Setting up kruize...Done" | tee -a ${LOG} + + sleep 60 + + # create performance profile + create_performance_profile ${perf_profile_json} + else + echo "Skipping kruize setup..." | tee -a ${LOG} + fi + + # If testcase is not specified run all tests + if [ -z "${testcase}" ]; then + testtorun=("${local_monitoring_tests[@]}") + else + testtorun=${testcase} + fi + + # create the result directory for given testsuite + echo "" + mkdir -p ${TEST_SUITE_DIR} + + PIP_INSTALL_LOG="${TEST_SUITE_DIR}/pip_install.log" + + echo "" + echo "Installing the required python modules..." + echo "python3 -m pip install --user -r "${LOCAL_MONITORING_TEST_DIR}/requirements.txt" > ${PIP_INSTALL_LOG}" + #removing --user flag as facing error: "Can not perform a '--user' install. User site-packages are not visible in this virtualenv." + python3 -m pip install -r "${LOCAL_MONITORING_TEST_DIR}/requirements.txt" > ${PIP_INSTALL_LOG} 2>&1 + err_exit "ERROR: Installing python modules for the test run failed!" + + echo "" + echo "******************* Executing test suite ${FUNCNAME} ****************" + echo "" + + for test in "${testtorun[@]}" + do + TEST_DIR="${TEST_SUITE_DIR}/${test}" + mkdir ${TEST_DIR} + LOG="${TEST_DIR}/${test}.log" + + echo "" + echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" | tee -a ${LOG} + echo " Running Test ${test}" | tee -a ${LOG} + echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"| tee -a ${LOG} + + echo " " | tee -a ${LOG} + echo "Test description: ${local_monitoring_test_description[$test]}" | tee -a ${LOG} + echo " " | tee -a ${LOG} + + pushd ${LOCAL_MONITORING_TEST_DIR}/rest_apis > /dev/null + echo "pytest -m ${test} --junitxml=${TEST_DIR}/report-${test}.xml --html=${TEST_DIR}/report-${test}.html --cluster_type ${cluster_type}" + pytest -m ${test} --junitxml=${TEST_DIR}/report-${test}.xml --html=${TEST_DIR}/report-${test}.html --cluster_type ${cluster_type} | tee -a ${LOG} + err_exit "ERROR: Running the test using pytest failed, check ${LOG} for details!" + + popd > /dev/null + + passed=$(grep -o -E '[0-9]+ passed' ${TEST_DIR}/report-${test}.html | cut -d' ' -f1) + failed=$(grep -o -E 'check the boxes to filter the results.*' ${TEST_DIR}/report-${test}.html | grep -o -E '[0-9]+ failed' | cut -d' ' -f1) + errors=$(grep -o -E '[0-9]+ errors' ${TEST_DIR}/report-${test}.html | cut -d' ' -f1) + + TESTS_PASSED=$(($TESTS_PASSED + $passed)) + TESTS_FAILED=$(($TESTS_FAILED + $failed)) + + if [ "${errors}" -ne "0" ]; then + echo "Tests did not execute there were errors, check the logs" + exit 1 + fi + + if [ "${TESTS_FAILED}" -ne "0" ]; then + FAILED_CASES+=(${test}) + fi + + done + + TESTS=$(($TESTS_PASSED + $TESTS_FAILED)) + TOTAL_TESTS_FAILED=${TESTS_FAILED} + TOTAL_TESTS_PASSED=${TESTS_PASSED} + TOTAL_TESTS=${TESTS} + + if [ "${TESTS_FAILED}" -ne "0" ]; then + FAILED_TEST_SUITE+=(${FUNCNAME}) + fi + + end_time=$(get_date) + elapsed_time=$(time_diff "${start_time}" "${end_time}") + + # Remove the duplicates + FAILED_CASES=( $(printf '%s\n' "${FAILED_CASES[@]}" | uniq ) ) + + # print the testsuite summary + testsuitesummary ${FUNCNAME} ${elapsed_time} ${FAILED_CASES} +} diff --git a/tests/scripts/local_monitoring_tests/pytest.ini b/tests/scripts/local_monitoring_tests/pytest.ini new file mode 100644 index 000000000..48bdd36e6 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/pytest.ini @@ -0,0 +1,7 @@ +# content of pytest.ini +[pytest] +markers = + sanity: mark a test as a sanity test + test_e2e: mark a test as end-to-end test + negative: mark test as a negative test + extended: mark test as a extended test diff --git a/tests/scripts/local_monitoring_tests/requirements.txt b/tests/scripts/local_monitoring_tests/requirements.txt new file mode 100644 index 000000000..b14263e72 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/requirements.txt @@ -0,0 +1,4 @@ +pytest +requests +jinja2 +pytest-html==3.2.0 \ No newline at end of file diff --git a/tests/scripts/remote_monitoring_tests/fault_tolerant_tests/kruize_pod_restart_test.py b/tests/scripts/remote_monitoring_tests/fault_tolerant_tests/kruize_pod_restart_test.py index 41bf659fd..50118a982 100644 --- a/tests/scripts/remote_monitoring_tests/fault_tolerant_tests/kruize_pod_restart_test.py +++ b/tests/scripts/remote_monitoring_tests/fault_tolerant_tests/kruize_pod_restart_test.py @@ -18,7 +18,7 @@ import json import os import time -sys.path.append("..") +sys.path.append("../../") from helpers.kruize import * from helpers.utils import * from helpers.generate_rm_jsons import * diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py index b72032891..b4c06e5c8 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py @@ -1,4 +1,7 @@ import pytest +import sys +sys.path.append("../../") + from helpers.fixtures import * from helpers.kruize import * from helpers.utils import * diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py index c501f9b27..92bee44a1 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py @@ -2,6 +2,9 @@ import json import pytest +import sys +sys.path.append("../../") + from helpers.fixtures import * from helpers.generate_rm_jsons import * from helpers.kruize import * diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py index 1ce577fce..d72e96adb 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py @@ -2,6 +2,8 @@ import json import pytest +import sys +sys.path.append("../../") from helpers.all_terms_list_reco_json_schema import all_terms_list_reco_json_schema from helpers.fixtures import * diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py index 9273c271d..c7c58b2f3 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -1,4 +1,6 @@ import pytest +import sys +sys.path.append("../../") from helpers.fixtures import * from helpers.kruize import * from helpers.list_reco_json_validate import * diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py index 65ef4d5f7..0d105f167 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py @@ -1,4 +1,6 @@ import pytest +import sys +sys.path.append("../../") from helpers.fixtures import * from helpers.kruize import * from helpers.utils import * diff --git a/tests/test_autotune.sh b/tests/test_autotune.sh index 5bf18e2e4..c99bece9f 100755 --- a/tests/test_autotune.sh +++ b/tests/test_autotune.sh @@ -214,7 +214,7 @@ if [ ! -z "${testcase}" ]; then fi # check for benchmarks directory path -if [ ! "${testsuite}" == "remote_monitoring_tests" ]; then +if [[ "${testsuite}" != "remote_monitoring_tests" && "${testsuite}" != "local_monitoring_tests" ]]; then if [ -z "${APP_REPO}" ]; then echo "Error: Do specify the benchmarks directory path" usage @@ -256,7 +256,8 @@ if [ "${setup}" -ne "0" ]; then exit 0 fi else - if [ ${testsuite} == "remote_monitoring_tests" ]; then + #TODO: the target for local monitoring is temporarily set to "crc" for the demo + if [ ${testsuite} == "remote_monitoring_tests" ] || [ ${testsuite} == "local_monitoring_tests" ] ; then target="crc" else target="autotune" From 86e5bbb4c0b33806a8293881df3d0988f9f97b28 Mon Sep 17 00:00:00 2001 From: Shreya Date: Fri, 26 Apr 2024 18:22:34 +0530 Subject: [PATCH 02/10] Add import metadata tests --- .../helpers/import_metadata_json_schema.py | 36 ++++++ .../helpers/import_metadata_json_validate.py | 68 ++++++++++ tests/scripts/helpers/kruize.py | 89 +++++++++++++ tests/scripts/helpers/utils.py | 6 + .../json_files/import_metadata.json | 5 + .../json_files/import_metadata_template.json | 4 + .../rest_apis/test_import_metadata.py | 121 ++++++++++++++++++ 7 files changed, 329 insertions(+) create mode 100644 tests/scripts/helpers/import_metadata_json_schema.py create mode 100644 tests/scripts/helpers/import_metadata_json_validate.py create mode 100644 tests/scripts/local_monitoring_tests/json_files/import_metadata.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/import_metadata_template.json create mode 100644 tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py diff --git a/tests/scripts/helpers/import_metadata_json_schema.py b/tests/scripts/helpers/import_metadata_json_schema.py new file mode 100644 index 000000000..a81961494 --- /dev/null +++ b/tests/scripts/helpers/import_metadata_json_schema.py @@ -0,0 +1,36 @@ +import_metadata_json_schema = { + "type": "object", + "properties": { + "datasources": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9_-]+$": { + "type": "object", + "properties": { + "datasource_name": { + "type": "string", + "pattern": "^[a-zA-Z0-9_-]+$" + }, + "clusters": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9_-]+$": { + "type": "object", + "properties": { + "cluster_name": { + "type": "string", + "pattern": "^[a-zA-Z0-9_-]+$" + } + }, + "required": ["cluster_name"] + } + } + } + }, + "required": ["datasource_name", "clusters"] + } + } + } + }, + "required": ["datasources"] +} diff --git a/tests/scripts/helpers/import_metadata_json_validate.py b/tests/scripts/helpers/import_metadata_json_validate.py new file mode 100644 index 000000000..3772228ff --- /dev/null +++ b/tests/scripts/helpers/import_metadata_json_validate.py @@ -0,0 +1,68 @@ +""" +Copyright (c) 2023, 2023 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import json +import jsonschema +from jsonschema import FormatChecker +from jsonschema.exceptions import ValidationError +from helpers.import_metadata_json_schema import import_metadata_json_schema + +JSON_NULL_VALUES = ("is not of type 'string'", "is not of type 'integer'", "is not of type 'number'") +VALUE_MISSING = " cannot be empty or null!" + +def validate_import_metadata_json(import_metadata_json, json_schema): + errorMsg = "" + try: + # create a validator with the format checker + print("Validating json against the json schema...") + validator = jsonschema.Draft7Validator(json_schema, format_checker=FormatChecker()) + + # validate the JSON data against the schema + errors = "" + errors = list(validator.iter_errors(import_metadata_json)) + print("Validating json against the json schema...done") + errorMsg = validate_import_metadata_json_values(import_metadata_json) + + if errors: + custom_err = ValidationError(errorMsg) + errors.append(custom_err) + return errors + else: + return errorMsg + except ValidationError as err: + print("Received a VaidationError") + + # Check if the exception is due to empty or null required parameters and prepare the response accordingly + if any(word in err.message for word in JSON_NULL_VALUES): + errorMsg = "Parameters" + VALUE_MISSING + return errorMsg + # Modify the error response in case of additional properties error + elif str(err.message).__contains__('('): + errorMsg = str(err.message).split('(') + return errorMsg[0] + else: + return err.message + +def validate_import_metadata_json_values(metadata): + validationErrorMsg = "" + + for key in metadata.keys(): + + # Check if any of the key is empty or null + if not (str(metadata[key]) and str(metadata[key]).strip()): + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + + return validationErrorMsg.lstrip(',') + diff --git a/tests/scripts/helpers/kruize.py b/tests/scripts/helpers/kruize.py index 74fc89a3b..ab2bc5f83 100644 --- a/tests/scripts/helpers/kruize.py +++ b/tests/scripts/helpers/kruize.py @@ -239,3 +239,92 @@ def list_experiments(results=None, recommendations=None, latest=None, experiment response = requests.get(url) print("Response status code = ", response.status_code) return response + + +# Description: This function obtains the list of datasources from Kruize Autotune using datasources API +# Input Parameters: None +def list_datasources(name=None): + print("\nListing the datasources...") + query_params = {} + + if name is not None: + query_params['name'] = name + + query_string = "&".join(f"{key}={value}" for key, value in query_params.items()) + + url = URL + "/datasources" + if query_string: + url += "?" + query_string + print("URL = ", url) + response = requests.get(url) + print("Response status code = ", response.status_code) + return response + + +# Description: This function validates the input json and imports metadata using POST dsmetadata API to Kruize Autotune +# Input Parameters: datasource input json +def import_metadata(input_json_file, invalid_header=False): + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + print("\n************************************************************") + pretty_json_str = json.dumps(input_json, indent=4) + print(pretty_json_str) + print("\n************************************************************") + + # read the json + print("\nImporting the metadata...") + + url = URL + "/dsmetadata" + print("URL = ", url) + + headers = {'content-type': 'application/xml'} + if invalid_header: + print("Invalid header") + response = requests.post(url, json=input_json, headers=headers) + else: + response = requests.post(url, json=input_json) + + print("Response status code = ", response.status_code) + try: + # Parse the response content as JSON into a Python dictionary + response_json = response.json() + + # Check if the response_json is a valid JSON object or array + if isinstance(response_json, (dict, list)): + # Convert the response_json back to a JSON-formatted string with double quotes and pretty print it + pretty_response_json_str = json.dumps(response_json, indent=4) + + # Print the JSON string + print(pretty_response_json_str) + else: + print("Invalid JSON format in the response.") + print(response.text) # Print the response text as-is + except json.JSONDecodeError: + print("Response content is not valid JSON.") + print(response.text) # Print the response text as-is + return response + + +# Description: This function deletes the metadata and posts the metadata using dsmetadata API to Kruize Autotune +# Input Parameters: datasource input json +def delete_metadata(input_json_file, invalid_header=False): + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + + print("\nDeleting the metadata...") + + url = URL + "/dsmetadata" + print("URL = ", url) + + datasource_name = input_json['datasource_name'] + + headers = {'content-type': 'application/xml'} + if invalid_header: + print("Invalid header") + response = requests.delete(url, json=input_json, headers=headers) + else: + response = requests.delete(url, json=input_json) + + print(response) + print("Response status code = ", response.status_code) + return response \ No newline at end of file diff --git a/tests/scripts/helpers/utils.py b/tests/scripts/helpers/utils.py index 4c00da192..7284b78b6 100644 --- a/tests/scripts/helpers/utils.py +++ b/tests/scripts/helpers/utils.py @@ -213,6 +213,12 @@ "memoryRSS_format": "MiB" } +# version, datasource_name +import_metadata_test_data = { + "version": "v1.0", + "datasource_name": "prometheus-1", +} + test_type = {"blank": "", "null": "null", "invalid": "xyz"} aggr_info_keys_to_skip = ["cpuRequest_sum", "cpuRequest_avg", "cpuLimit_sum", "cpuLimit_avg", "cpuUsage_sum", "cpuUsage_max", diff --git a/tests/scripts/local_monitoring_tests/json_files/import_metadata.json b/tests/scripts/local_monitoring_tests/json_files/import_metadata.json new file mode 100644 index 000000000..8a6ff0d0d --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/import_metadata.json @@ -0,0 +1,5 @@ +{ + "version": "v1.0", + "datasource_name": "prometheus-1" +} + diff --git a/tests/scripts/local_monitoring_tests/json_files/import_metadata_template.json b/tests/scripts/local_monitoring_tests/json_files/import_metadata_template.json new file mode 100644 index 000000000..f36ad6cf1 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/import_metadata_template.json @@ -0,0 +1,4 @@ +{ + "version": "{{version}}", + "datasource_name": "{{datasource_name}}" +} diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py new file mode 100644 index 000000000..171fcdb29 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py @@ -0,0 +1,121 @@ +import pytest +import json +import sys + +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from helpers.import_metadata_json_validate import * +from jinja2 import Environment, FileSystemLoader + +mandatory_fields = [ + ("version", ERROR_STATUS_CODE, ERROR_STATUS), + ("datasource_name", ERROR_STATUS_CODE, ERROR_STATUS) +] + +csvfile = "/tmp/import_metadata_test_data.csv" + +@pytest.mark.sanity +def test_import_metadata(cluster_type): + """ + Test Description: This test validates the response status code of dsmetadata API by passing a + valid input for the json + """ + input_json_file = "../json_files/import_metadata.json" + + form_kruize_url(cluster_type) + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(input_json_file) + metadata_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_import_metadata_json(metadata_json, import_metadata_json_schema) + assert errorMsg == "" + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + +@pytest.mark.negative +def test_import_metadata_with_invalid_header(cluster_type): + """ + Test Description: This test validates the importing of metadata by specifying invalid content type in the header + """ + + input_json_file = "../json_files/import_metadata.json" + + form_kruize_url(cluster_type) + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(input_json_file, invalid_header=True) + + data = response.json() + #print(data['message']) + print("content type = ", response.headers["Content-Type"]) + + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + +@pytest.mark.negative +@pytest.mark.parametrize( + "test_name, expected_status_code, version, datasource_name", + generate_test_data(csvfile, import_metadata_test_data, "import_metadata")) +def test_import_metadata_invalid_test(test_name, expected_status_code, version, datasource_name, cluster_type): + """ + Test Description: This test validates the response status code of POST dsmtedata API against + invalid input (blank, null, empty) for the json parameters. + """ + print("\n****************************************************") + print("Test - ", test_name) + print("****************************************************\n") + tmp_json_file = "/tmp/import_metadata_" + test_name + ".json" + + print("tmp_json_file = ", tmp_json_file) + + form_kruize_url(cluster_type) + + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("import_metadata_template.json") + if "null" in test_name: + field = test_name.replace("null_", "") + json_file = "../json_files/import_metadata_template.json" + filename = "/tmp/import_metadata_template.json" + + strip_double_quotes_for_field(json_file, field, filename) + environment = Environment(loader=FileSystemLoader("/tmp/")) + template = environment.get_template("import_metadata_template.json") + + content = template.render( + version=version, + datasource_name=datasource_name, + ) + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + message.write(content) + + response = delete_metadata(tmp_json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(tmp_json_file) + metadata_json = response.json() + + print(metadata_json['message']) + + # temporarily moved this up to avoid failures in the subsequent tests + response_delete_metadata = delete_metadata(tmp_json_file) + print("delete metadata = ", response_delete_metadata.status_code) + + assert response.status_code == int(expected_status_code) \ No newline at end of file From f686a5af39045fa31fa619bab209e25f891ca12a Mon Sep 17 00:00:00 2001 From: Shreya Date: Fri, 3 May 2024 11:22:21 +0530 Subject: [PATCH 03/10] Add mandatory fields and duplicate import metadata test cases --- tests/scripts/helpers/kruize.py | 2 - .../json_files/import_metadata_mandatory.json | 4 + .../rest_apis/test_import_metadata.py | 80 ++++++++++++++++++- 3 files changed, 83 insertions(+), 3 deletions(-) create mode 100644 tests/scripts/local_monitoring_tests/json_files/import_metadata_mandatory.json diff --git a/tests/scripts/helpers/kruize.py b/tests/scripts/helpers/kruize.py index ab2bc5f83..6da537719 100644 --- a/tests/scripts/helpers/kruize.py +++ b/tests/scripts/helpers/kruize.py @@ -316,8 +316,6 @@ def delete_metadata(input_json_file, invalid_header=False): url = URL + "/dsmetadata" print("URL = ", url) - datasource_name = input_json['datasource_name'] - headers = {'content-type': 'application/xml'} if invalid_header: print("Invalid header") diff --git a/tests/scripts/local_monitoring_tests/json_files/import_metadata_mandatory.json b/tests/scripts/local_monitoring_tests/json_files/import_metadata_mandatory.json new file mode 100644 index 000000000..b1b5d9ef5 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/import_metadata_mandatory.json @@ -0,0 +1,4 @@ +{ + "version": "v1.0", + "datasource_name": "prometheus-1" +} diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py index 171fcdb29..499cd1b11 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py @@ -118,4 +118,82 @@ def test_import_metadata_invalid_test(test_name, expected_status_code, version, response_delete_metadata = delete_metadata(tmp_json_file) print("delete metadata = ", response_delete_metadata.status_code) - assert response.status_code == int(expected_status_code) \ No newline at end of file + assert response.status_code == int(expected_status_code) + + +@pytest.mark.extended +@pytest.mark.parametrize("field, expected_status_code, expected_status", mandatory_fields) +def test_import_metadata_mandatory_fields(cluster_type, field, expected_status_code, expected_status): + form_kruize_url(cluster_type) + + # Import metadata using the specified json + json_file = "/tmp/import_metadata.json" + input_json_file = "../json_files/import_metadata_mandatory.json" + json_data = json.load(open(input_json_file)) + + if field == 'version': + json_data.pop("version", None) + else: + json_data.pop("datasource_name", None) + + print("\n*****************************************") + print(json_data) + print("*****************************************\n") + data = json.dumps(json_data) + with open(json_file, 'w') as file: + file.write(data) + + response = delete_metadata(json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(json_file) + metadata_json = response.json() + + assert response.status_code == expected_status_code, \ + f"Mandatory field check failed for {field} actual - {response.status_code} expected - {expected_status_code}" + assert metadata_json['status'] == expected_status + + response = delete_metadata(json_file) + print("delete metadata = ", response.status_code) + + +@pytest.mark.sanity +def test_duplicate_import_metadata(cluster_type): + """ + Test Description: This test validates the response status code of /dsmetadata API by specifying the + same datasource name + """ + input_json_file = "../json_files/import_metadata.json" + json_data = json.load(open(input_json_file)) + + datasource_name = json_data['datasource_name'] + print("datasource_name = ", datasource_name) + + form_kruize_url(cluster_type) + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(input_json_file) + metadata_json = response.json() + + assert response.status_code == SUCCESS_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_import_metadata_json(metadata_json, import_metadata_json_schema) + assert errorMsg == "" + + # Import metadata using the specified json + response = import_metadata(input_json_file) + metadata_json = response.json() + + assert response.status_code == SUCCESS_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_import_metadata_json(metadata_json, import_metadata_json_schema) + assert errorMsg == "" + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) \ No newline at end of file From f94524c5eaccb1761759c3b2edbd02e750dc1b69 Mon Sep 17 00:00:00 2001 From: Shreya Date: Fri, 3 May 2024 18:43:21 +0530 Subject: [PATCH 04/10] Add test cases for list datasources API --- tests/scripts/helpers/kruize.py | 5 ++ .../helpers/list_datasources_json_schema.py | 34 ++++++++ .../helpers/list_datasources_json_validate.py | 81 +++++++++++++++++++ .../rest_apis/test_list_datasources.py | 71 ++++++++++++++++ 4 files changed, 191 insertions(+) create mode 100644 tests/scripts/helpers/list_datasources_json_schema.py create mode 100644 tests/scripts/helpers/list_datasources_json_validate.py create mode 100644 tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py diff --git a/tests/scripts/helpers/kruize.py b/tests/scripts/helpers/kruize.py index 6da537719..029e6eaba 100644 --- a/tests/scripts/helpers/kruize.py +++ b/tests/scripts/helpers/kruize.py @@ -257,7 +257,12 @@ def list_datasources(name=None): url += "?" + query_string print("URL = ", url) response = requests.get(url) + + print("PARAMS = ", query_params) print("Response status code = ", response.status_code) + print("\n************************************************************") + print(response.text) + print("\n************************************************************") return response diff --git a/tests/scripts/helpers/list_datasources_json_schema.py b/tests/scripts/helpers/list_datasources_json_schema.py new file mode 100644 index 000000000..3b14c4069 --- /dev/null +++ b/tests/scripts/helpers/list_datasources_json_schema.py @@ -0,0 +1,34 @@ +list_datasources_json_schema = { + "type": "object", + "properties": { + "version": { + "type": "string" + }, + "datasources": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "provider": { + "type": "string" + }, + "serviceName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "required": ["name", "provider", "serviceName", "namespace", "url"] + } + } + }, + "required": ["version", "datasources"] +} diff --git a/tests/scripts/helpers/list_datasources_json_validate.py b/tests/scripts/helpers/list_datasources_json_validate.py new file mode 100644 index 000000000..d5e538625 --- /dev/null +++ b/tests/scripts/helpers/list_datasources_json_validate.py @@ -0,0 +1,81 @@ +""" +Copyright (c) 2023, 2023 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import json +import jsonschema +from jsonschema import FormatChecker +from jsonschema.exceptions import ValidationError +from helpers.list_datasources_json_schema import list_datasources_json_schema + +#TODO - currently only prometheus datasurce provider is supported +DATASOURCE_TYPE_SUPPORTED = "prometheus" + +JSON_NULL_VALUES = ("is not of type 'string'", "is not of type 'integer'", "is not of type 'number'") +VALUE_MISSING = " cannot be empty or null!" + +def validate_list_datasources_json(list_datasources_json, json_schema): + errorMsg = "" + try: + # create a validator with the format checker + print("Validating json against the json schema...") + validator = jsonschema.Draft7Validator(json_schema, format_checker=FormatChecker()) + + # validate the JSON data against the schema + errors = "" + errors = list(validator.iter_errors(list_datasources_json)) + print("Validating json against the json schema...done") + errorMsg = validate_list_datasources_json_values(list_datasources_json) + + if errors: + custom_err = ValidationError(errorMsg) + errors.append(custom_err) + return errors + else: + return errorMsg + except ValidationError as err: + print("Received a VaidationError") + + # Check if the exception is due to empty or null required parameters and prepare the response accordingly + if any(word in err.message for word in JSON_NULL_VALUES): + errorMsg = "Parameters" + VALUE_MISSING + return errorMsg + # Modify the error response in case of additional properties error + elif str(err.message).__contains__('('): + errorMsg = str(err.message).split('(') + return errorMsg[0] + else: + return err.message + +def validate_list_datasources_json_values(list_datasources_json): + validationErrorMsg = "" + obj_arr = ["datasources"] + + for key in list_datasources_json.keys(): + + # Check if any of the key is empty or null + if not (str(list_datasources_json[key]) and str(list_datasources_json[key]).strip()): + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + + for obj in obj_arr: + if obj == key: + for subkey in list_datasources_json[key][0].keys(): + # Check if any of the key is empty or null + if not (str(list_datasources_json[key][0][subkey]) and str(list_datasources_json[key][0][subkey]).strip()): + print(f"FAILED - {str(list_datasources_json[key][0][subkey])} is empty or null") + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + elif str(subkey) == "provider" and str(list_datasources_json[key][0][subkey]) not in DATASOURCE_TYPE_SUPPORTED: + validationErrorMsg = ",".join([validationErrorMsg, DATASOURCE_TYPE_SUPPORTED]) + + return validationErrorMsg.lstrip(',') diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py new file mode 100644 index 000000000..6b327b442 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py @@ -0,0 +1,71 @@ +import pytest +import json +import sys + +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from helpers.list_datasources_json_validate import * + + +@pytest.mark.sanity +def test_list_datasources_without_parameters(cluster_type): + """ + Test Description: This test validates datasources API without parameters + """ + form_kruize_url(cluster_type) + + # Get the datasources name + datasource_name = None + response = list_datasources(datasource_name) + + list_datasources_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_datasources_json(list_datasources_json, list_datasources_json_schema) + assert errorMsg == "" + + +@pytest.mark.sanity +def test_list_datasources_with_name(cluster_type): + """ + Test Description: This test validates datasources API with 'name' parameter + """ + form_kruize_url(cluster_type) + + # Get the datasources name + datasource_name = "prometheus-1" + response = list_datasources(datasource_name) + + list_datasources_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_datasources_json(list_datasources_json, list_datasources_json_schema) + assert errorMsg == "" + + +@pytest.mark.negative +@pytest.mark.parametrize("datasource_name", ["", "null", "xyz"]) +def test_list_datasources_invalid_datasorce_name(datasource_name, cluster_type): + """ + Test Description: This test validates the response status code of list datasources API against + invalid input (blank, null, empty) for the json parameters. + """ + print("\n****************************************************") + print("Test datasource_name = ", datasource_name) + print("****************************************************\n") + + form_kruize_url(cluster_type) + + # Get the datasource name + name = datasource_name + response = list_datasources(name) + + assert response.status_code == ERROR_STATUS_CODE + From 0181060797dead0dafff36e699da905e36f58506 Mon Sep 17 00:00:00 2001 From: Shreya Date: Mon, 6 May 2024 18:17:52 +0530 Subject: [PATCH 05/10] Enable Kruize local flag in local monitoring testsuite --- tests/scripts/common/common_functions.sh | 18 +++++++++++++++++- .../local_monitoring_tests.sh | 5 ++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/tests/scripts/common/common_functions.sh b/tests/scripts/common/common_functions.sh index ddd040b88..36fde16e6 100755 --- a/tests/scripts/common/common_functions.sh +++ b/tests/scripts/common/common_functions.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2020, 2021 Red Hat, IBM Corporation and others. +# Copyright (c) 2020, 2024 Red Hat, IBM Corporation and others. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1823,3 +1823,19 @@ function create_performance_profile() { exit 1 fi } + +# +# "local" flag is turned off by default for now. This needs to be set to true. +# +function kruize_local_patch() { + CRC_DIR="./manifests/crc/default-db-included-installation" + KRUIZE_CRC_DEPLOY_MANIFEST_OPENSHIFT="${CRC_DIR}/openshift/kruize-crc-openshift.yaml" + KRUIZE_CRC_DEPLOY_MANIFEST_MINIKUBE="${CRC_DIR}/minikube/kruize-crc-minikube.yaml" + + + if [ ${cluster_type} == "minikube" ]; then + sed -i 's/"local": "false"/"local": "true"/' ${KRUIZE_CRC_DEPLOY_MANIFEST_MINIKUBE} + elif [ ${cluster_type} == "openshift" ]; then + sed -i 's/"local": "false"/"local": "true"/' ${KRUIZE_CRC_DEPLOY_MANIFEST_OPENSHIFT} + fi +} diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh index 39fb56f7a..e49aa6cfc 100644 --- a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2023, 2023 Red Hat, IBM Corporation and others. +# Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -57,6 +57,9 @@ function local_monitoring_tests() { mkdir -p ${TEST_SUITE_DIR} + # check for 'local' flag + kruize_local_patch + # Setup kruize if [ ${skip_setup} -eq 0 ]; then echo "Setting up kruize..." | tee -a ${LOG} From 118cad1289863e37bf163ccc9cf4c1a2e781aabb Mon Sep 17 00:00:00 2001 From: Shreya Date: Mon, 6 May 2024 18:20:36 +0530 Subject: [PATCH 06/10] Validate response error message --- tests/scripts/helpers/utils.py | 3 ++- .../local_monitoring_tests/rest_apis/test_import_metadata.py | 2 -- .../rest_apis/test_list_datasources.py | 5 ++++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/scripts/helpers/utils.py b/tests/scripts/helpers/utils.py index 7284b78b6..6af52de2a 100644 --- a/tests/scripts/helpers/utils.py +++ b/tests/scripts/helpers/utils.py @@ -1,5 +1,5 @@ """ -Copyright (c) 2022, 2022 Red Hat, IBM Corporation and others. +Copyright (c) 2022, 2024 Red Hat, IBM Corporation and others. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,6 +49,7 @@ COST_RECOMMENDATIONS_AVAILABLE = "Cost Recommendations Available" PERFORMANCE_RECOMMENDATIONS_AVAILABLE = "Performance Recommendations Available" CONTAINER_AND_EXPERIMENT_NAME = " for container : %s for experiment: %s.]" +LIST_DATASOURCES_ERROR_MSG = "Given datasource name - \" %s \" either does not exist or is not valid" # Kruize Recommendations Notification codes NOTIFICATION_CODE_FOR_RECOMMENDATIONS_AVAILABLE = "111000" diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py index 499cd1b11..59b49e368 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py @@ -112,8 +112,6 @@ def test_import_metadata_invalid_test(test_name, expected_status_code, version, response = import_metadata(tmp_json_file) metadata_json = response.json() - print(metadata_json['message']) - # temporarily moved this up to avoid failures in the subsequent tests response_delete_metadata = delete_metadata(tmp_json_file) print("delete metadata = ", response_delete_metadata.status_code) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py index 6b327b442..42e947ceb 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py @@ -52,7 +52,7 @@ def test_list_datasources_with_name(cluster_type): @pytest.mark.negative @pytest.mark.parametrize("datasource_name", ["", "null", "xyz"]) -def test_list_datasources_invalid_datasorce_name(datasource_name, cluster_type): +def test_list_datasources_invalid_datasource_name(datasource_name, cluster_type): """ Test Description: This test validates the response status code of list datasources API against invalid input (blank, null, empty) for the json parameters. @@ -67,5 +67,8 @@ def test_list_datasources_invalid_datasorce_name(datasource_name, cluster_type): name = datasource_name response = list_datasources(name) + list_datasources_json = response.json() assert response.status_code == ERROR_STATUS_CODE + assert list_datasources_json['message'] == LIST_DATASOURCES_ERROR_MSG % name + From a903874a5dc38e20dee6af99c516436fceb066b9 Mon Sep 17 00:00:00 2001 From: Shreya Date: Wed, 8 May 2024 10:31:33 +0530 Subject: [PATCH 07/10] Update test documentation --- tests/README.md | 16 ++++ .../Local_monitoring_tests.md | 96 +++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 tests/scripts/local_monitoring_tests/Local_monitoring_tests.md diff --git a/tests/README.md b/tests/README.md index 99a47fe2d..d4d9a3d8a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -143,6 +143,16 @@ To run the stress test refer the Stress test [README](/tests/scripts/remote_moni To run the fault tolerant test refer the [README](/tests/scripts/remote_monitoring_tests/fault_tolerant_tests.md) +### Local monitoring tests + +Here we test Kruize [Local monitoring APIs](/design/KruizeLocalAPI.md). + +#### API tests + +The tests does the following: +- Deploys kruize in non-CRD mode using the deploy script from the autotune repo +- Validates the behaviour of list datasources, import metadata and list metadata APIs in various scenarios covering both positive and negative usecases. + ## Supported Clusters - Minikube @@ -204,6 +214,12 @@ To run remote monitoring tests, /tests/test_autotune.sh -c minikube -i kruize/autotune_operator:0.0.11_mvp --testsuite=remote_monitoring_tests --resultsdir=/home/results ``` +To run local monitoring tests, + +``` +/tests/test_autotune.sh -c minikube -i kruize/autotune_operator:0.0.21_mvp --testsuite=local_monitoring_tests --resultsdir=/home/results +``` + ## How to test a specific autotune module? To run the tests specific to a autotune module use the "testmodule" option. For example, to run all the tests for dependency analyzer module execute the below command: diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md new file mode 100644 index 000000000..126552efb --- /dev/null +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -0,0 +1,96 @@ +# **Kruize Local monitoring tests** + +Kruize Local monitoring tests validates the behaviour of [Kruize local monitoring APIs](/design/KruizeLocalAPI.md) +using various positive and negative scenarios. These tests are developed using pytest framework. + +## Tests description +### **List Datasources API tests** + +Here are the test scenarios: +- List all datasources +- List datasources with name query parameter: + - /datasources?name= +- List datasources with invalid parameter value for datasource name tested with empty, NULL or invalid. + +### **Import Metadata API tests** + +Here are the test scenarios: + +- Importing metadata for a valid datasource to the API. +- Post an invalid header content type +- Post the same datasource again +- Test with invalid values such as blank, null or an invalid value for various keys in the dsmetadata input request json +- Validate error messages when the mandatory fields are missing + +The above tests are developed using pytest framework and the tests are run using shell script wrapper that does the following: +- Deploys kruize in non-CRD mode using the [deploy script](https://github.com/kruize/autotune/blob/master/deploy.sh) from the autotune repo +- Creates a resource optimization performance profile using the [createPerformanceProfile API](/design/PerformanceProfileAPI.md) +- Runs the above tests using pytest + +## Prerequisites for running the tests: +- Minikube setup or access to Openshift cluster +- Tools like kubectl, oc, curl, jq, python +- Various python modules pytest, json, pytest-html, requests, jinja2 + (these modules will be automatically installed while the test is run) + +## How to run the test? + +Use the below command to test : + +``` +/tests/test_autotune.sh -c minikube -r [location of benchmarks] [-i kruize image] [--tctype=functional] [--testmodule=Autotune module to be tested] [--testsuite=Group of tests that you want to perform] [--testcase=Particular test case that you want to test] [-n namespace] [--resultsdir=results directory] [--skipsetup] +``` + +Where values for test_autotune.sh are: + +``` +usage: test_autotune.sh [ -c ] : cluster type. Supported type - minikube + [ -i ] : optional. Kruize docker image to be used for testing, default - kruize/autotune_operator:test + [ -r ] : Location of benchmarks. Not required for local_monitoring_tests + [ --tctype ] : optional. Testcases type to run, default is functional (runs all functional tests) + [ --testmodule ]: Module to be tested. Use testmodule=help, to list the modules to be tested + [ --testsuite ] : Testsuite to run. Use testsuite=help, to list the supported testsuites + [ --testcase ] : Testcase to run. Use testcase=help along with the testsuite name to list the supported testcases in that testsuite + [ -n ] : optional. Namespace to deploy autotune + [ --resultsdir ] : optional. Results directory location, by default it creates the results directory in current working directory + [ --skipsetup ] : optional. Specifying this option skips the Kruize setup and performance profile creation in case of local_monitoring_tests + +Note: If you want to run a particular testcase then it is mandatory to specify the testsuite +Test cases supported are sanity, negative, extended and test_e2e + +``` + +To run all the local monitoring tests, + +``` +/tests/test_autotune.sh -c minikube --testsuite=local_monitoring_tests --resultsdir=/home/results +``` + +To run only the sanity local monitoring tests, + +``` +/tests/test_autotune.sh -c minikube --testsuite=local_monitoring_tests --testcase=sanity --resultsdir=/home/results +``` + +Local monitoring tests can also be run without using the test_autotune.sh. To do this, follow the below steps: + +- Deploy Kruize using the deploy.sh from the kruize autotune repo +- Create the performance profile by using the [createPerformanceProfile API](/design/PerformanceProfileAPI.md) +- cd /tests/scripts/local_monitoring_tests +- python3 -m pip install --user -r requirements.txt +- cd rest_apis +- To run all sanity tests +``` + pytest -m sanity --html=/report.html --cluster_type +``` +- To run only sanity tests for List datasources API --cluster_type +``` + pytest -m sanity --html=/report.html test_list_datasources.py +``` +- To run only a specific test within List datasources API +``` + pytest -s test_list_datasources.py::test_list_datasources_with_name --cluster_type +``` + +Note: You can check the report.html for the results as it provides better readability + From a34403ad9ae0988cbb70aa84d281178d6cd71c90 Mon Sep 17 00:00:00 2001 From: Shreya Date: Wed, 8 May 2024 16:18:20 +0530 Subject: [PATCH 08/10] Update docs, add review fixes --- tests/README.md | 10 +++++--- .../Local_monitoring_tests.md | 2 +- .../local_monitoring_tests.sh | 2 +- .../rest_apis/test_import_metadata.py | 17 ++++++++++++- .../rest_apis/test_list_datasources.py | 25 +++++++++++++++++-- .../Remote_monitoring_tests.md | 2 +- 6 files changed, 48 insertions(+), 10 deletions(-) diff --git a/tests/README.md b/tests/README.md index d4d9a3d8a..c5fc345dd 100644 --- a/tests/README.md +++ b/tests/README.md @@ -149,12 +149,14 @@ Here we test Kruize [Local monitoring APIs](/design/KruizeLocalAPI.md). #### API tests -The tests does the following: -- Deploys kruize in non-CRD mode using the deploy script from the autotune repo -- Validates the behaviour of list datasources, import metadata and list metadata APIs in various scenarios covering both positive and negative usecases. + The tests does the following: + - Deploys kruize in non-CRD mode using the deploy script from the autotune repo + - Validates the behaviour of list datasources, import metadata and list metadata APIs in various scenarios covering both positive and negative usecases. + + For details refer this [doc](/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md) ## Supported Clusters -- Minikube +- Minikube, Openshift ## Prerequisites for running the tests: diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md index 126552efb..ac61e9f40 100644 --- a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -44,7 +44,7 @@ Use the below command to test : Where values for test_autotune.sh are: ``` -usage: test_autotune.sh [ -c ] : cluster type. Supported type - minikube +usage: test_autotune.sh [ -c ] : cluster type. Supported type - minikube, openshift. Default - minikube [ -i ] : optional. Kruize docker image to be used for testing, default - kruize/autotune_operator:test [ -r ] : Location of benchmarks. Not required for local_monitoring_tests [ --tctype ] : optional. Testcases type to run, default is functional (runs all functional tests) diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh index e49aa6cfc..a76a2dd3d 100644 --- a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -42,7 +42,7 @@ function local_monitoring_tests() { target="crc" perf_profile_json="${LOCAL_MONITORING_TEST_DIR}/json_files/resource_optimization_openshift.json" - local_monitoring_tests=("test_e2e" "sanity" "extended" "negative") + local_monitoring_tests=("sanity" "extended" "negative") # check if the test case is supported if [ ! -z "${testcase}" ]; then diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py index 59b49e368..17ca499b0 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import pytest import json import sys @@ -157,7 +172,7 @@ def test_import_metadata_mandatory_fields(cluster_type, field, expected_status_c @pytest.mark.sanity -def test_duplicate_import_metadata(cluster_type): +def test_repeated_metadata_import(cluster_type): """ Test Description: This test validates the response status code of /dsmetadata API by specifying the same datasource name diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py index 42e947ceb..95ed0710a 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_datasources.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import pytest import json import sys @@ -51,8 +66,14 @@ def test_list_datasources_with_name(cluster_type): @pytest.mark.negative -@pytest.mark.parametrize("datasource_name", ["", "null", "xyz"]) -def test_list_datasources_invalid_datasource_name(datasource_name, cluster_type): +@pytest.mark.parametrize("test_name, expected_status_code, datasource_name", + [ + ("blank_name", 400, ""), + ("null_name", 400, "null"), + ("invalid_name", 400, "xyz") + ] +) +def test_list_datasources_invalid_datasource_name(test_name, expected_status_code, datasource_name, cluster_type): """ Test Description: This test validates the response status code of list datasources API against invalid input (blank, null, empty) for the json parameters. diff --git a/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md b/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md index 0f2b27b11..7fd4aadfd 100644 --- a/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md +++ b/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md @@ -97,7 +97,7 @@ Use the below command to test : Where values for test_autotune.sh are: ``` -usage: test_autotune.sh [ -c ] : cluster type. Supported type - minikube +usage: test_autotune.sh [ -c ] : cluster type. Supported type - minikube, openshift. Default - minikube [ -i ] : optional. Kruize docker image to be used for testing, default - kruize/autotune_operator:test [ -r ] : Location of benchmarks. Not required for remote_monitoring_tests [ --tctype ] : optional. Testcases type to run, default is functional (runs all functional tests) From b77814c79f41f2c0071c3f34750dc72d9b822ae2 Mon Sep 17 00:00:00 2001 From: Shreya Date: Wed, 8 May 2024 16:24:01 +0530 Subject: [PATCH 09/10] Add copyright documentation --- .../rest_apis/test_create_experiment.py | 15 +++++++++++++++ .../rest_apis/test_e2e_workflow.py | 15 +++++++++++++++ .../rest_apis/test_list_recommendations.py | 15 +++++++++++++++ .../rest_apis/test_update_recommendations.py | 15 +++++++++++++++ .../rest_apis/test_update_results.py | 15 +++++++++++++++ 5 files changed, 75 insertions(+) diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py index b4c06e5c8..a0825f02c 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2022, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import pytest import sys sys.path.append("../../") diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py index 92bee44a1..779857539 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2022, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import copy import json diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py index d72e96adb..c380a71d3 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2022, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import datetime import json diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py index c7c58b2f3..43c147ee8 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2022, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import pytest import sys sys.path.append("../../") diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py index 0d105f167..44a77a98b 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py @@ -1,3 +1,18 @@ +""" +Copyright (c) 2022, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" import pytest import sys sys.path.append("../../") From ad68b28dd5644483db89933922f027d4212ca201 Mon Sep 17 00:00:00 2001 From: Shreya Date: Thu, 9 May 2024 15:47:54 +0530 Subject: [PATCH 10/10] Remove invalid header testcase --- .../Local_monitoring_tests.md | 1 - .../rest_apis/test_import_metadata.py | 27 ------------------- 2 files changed, 28 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md index ac61e9f40..7d23513bf 100644 --- a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -17,7 +17,6 @@ Here are the test scenarios: Here are the test scenarios: - Importing metadata for a valid datasource to the API. -- Post an invalid header content type - Post the same datasource again - Test with invalid values such as blank, null or an invalid value for various keys in the dsmetadata input request json - Validate error messages when the mandatory fields are missing diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py index 17ca499b0..b68627683 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_import_metadata.py @@ -57,33 +57,6 @@ def test_import_metadata(cluster_type): print("delete metadata = ", response.status_code) -@pytest.mark.negative -def test_import_metadata_with_invalid_header(cluster_type): - """ - Test Description: This test validates the importing of metadata by specifying invalid content type in the header - """ - - input_json_file = "../json_files/import_metadata.json" - - form_kruize_url(cluster_type) - - response = delete_metadata(input_json_file) - print("delete metadata = ", response.status_code) - - # Import metadata using the specified json - response = import_metadata(input_json_file, invalid_header=True) - - data = response.json() - #print(data['message']) - print("content type = ", response.headers["Content-Type"]) - - assert response.status_code == ERROR_STATUS_CODE - assert data['status'] == ERROR_STATUS - - response = delete_metadata(input_json_file) - print("delete metadata = ", response.status_code) - - @pytest.mark.negative @pytest.mark.parametrize( "test_name, expected_status_code, version, datasource_name",