Skip to content

Commit

Permalink
Merge pull request #1187 from shreyabiradar07/local_monitoring_tests
Browse files Browse the repository at this point in the history
Include Local monitoring tests for Datasource APIs
  • Loading branch information
dinogun authored May 14, 2024
2 parents b615acd + ad68b28 commit 3aa0c47
Show file tree
Hide file tree
Showing 41 changed files with 1,210 additions and 11 deletions.
20 changes: 19 additions & 1 deletion tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,20 @@ To run the stress test refer the Stress test [README](/tests/scripts/remote_moni

To run the fault tolerant test refer the [README](/tests/scripts/remote_monitoring_tests/fault_tolerant_tests.md)

### Local monitoring tests

Here we test Kruize [Local monitoring APIs](/design/KruizeLocalAPI.md).

#### API tests

The tests does the following:
- Deploys kruize in non-CRD mode using the deploy script from the autotune repo
- Validates the behaviour of list datasources, import metadata and list metadata APIs in various scenarios covering both positive and negative usecases.

For details refer this [doc](/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md)

## Supported Clusters
- Minikube
- Minikube, Openshift

## Prerequisites for running the tests:

Expand Down Expand Up @@ -204,6 +216,12 @@ To run remote monitoring tests,
<AUTOTUNE_REPO>/tests/test_autotune.sh -c minikube -i kruize/autotune_operator:0.0.11_mvp --testsuite=remote_monitoring_tests --resultsdir=/home/results
```

To run local monitoring tests,

```
<AUTOTUNE_REPO>/tests/test_autotune.sh -c minikube -i kruize/autotune_operator:0.0.21_mvp --testsuite=local_monitoring_tests --resultsdir=/home/results
```

## How to test a specific autotune module?

To run the tests specific to a autotune module use the "testmodule" option. For example, to run all the tests for dependency analyzer module execute the below command:
Expand Down
21 changes: 19 additions & 2 deletions tests/scripts/common/common_functions.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2020, 2021 Red Hat, IBM Corporation and others.
# Copyright (c) 2020, 2024 Red Hat, IBM Corporation and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -45,7 +45,8 @@ TEST_SUITE_ARRAY=("app_autotune_yaml_tests"
"autotune_id_tests"
"kruize_layer_id_tests"
"em_standalone_tests"
"remote_monitoring_tests")
"remote_monitoring_tests"
"local_monitoring_tests")

modify_kruize_layer_tests=("add_new_tunable"
"apply_null_tunable"
Expand Down Expand Up @@ -1822,3 +1823,19 @@ function create_performance_profile() {
exit 1
fi
}

#
# "local" flag is turned off by default for now. This needs to be set to true.
#
function kruize_local_patch() {
CRC_DIR="./manifests/crc/default-db-included-installation"
KRUIZE_CRC_DEPLOY_MANIFEST_OPENSHIFT="${CRC_DIR}/openshift/kruize-crc-openshift.yaml"
KRUIZE_CRC_DEPLOY_MANIFEST_MINIKUBE="${CRC_DIR}/minikube/kruize-crc-minikube.yaml"


if [ ${cluster_type} == "minikube" ]; then
sed -i 's/"local": "false"/"local": "true"/' ${KRUIZE_CRC_DEPLOY_MANIFEST_MINIKUBE}
elif [ ${cluster_type} == "openshift" ]; then
sed -i 's/"local": "false"/"local": "true"/' ${KRUIZE_CRC_DEPLOY_MANIFEST_OPENSHIFT}
fi
}
1 change: 1 addition & 0 deletions tests/scripts/functional_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ SCRIPTS_DIR="${CURRENT_DIR}"
. ${SCRIPTS_DIR}/da/kruize_layer_id_tests.sh
. ${SCRIPTS_DIR}/em/em_standalone_tests.sh
. ${SCRIPTS_DIR}/remote_monitoring_tests/remote_monitoring_tests.sh
. ${SCRIPTS_DIR}/local_monitoring_tests/local_monitoring_tests.sh

# Iterate through the commandline options
while getopts i:o:r:-: gopts
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def generate_datasource_json(csv_file, json_file):
with open(json_file, 'w') as jsonfile:
json.dump(datasources, jsonfile, indent=4)

csv_file_path = '../csv_data/datasources.csv'
json_file_path = '../json_files/datasources.json'
csv_file_path = '../local_monitoring_tests/csv_data/datasources.csv'
json_file_path = '../local_monitoring_tests/json_files/datasources.json'

generate_datasource_json(csv_file_path, json_file_path)
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def convert_date_format(input_date_str):
output_date_str = input_date.strftime("%Y-%m-%dT%H:%M:%S.000Z")
return output_date_str

def create_exp_jsons(split = False, split_count = 1, exp_json_dir = "/tmp/exp_jsons", total_exps = 10):
def create_exp_jsons(split = False, split_count = 1, exp_json_dir = "/tmp/exp_jsons", total_exps = 10, target_cluster="remote"):
complete_json_data = []
single_json_data = []
multi_json_data = []
Expand Down
36 changes: 36 additions & 0 deletions tests/scripts/helpers/import_metadata_json_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import_metadata_json_schema = {
"type": "object",
"properties": {
"datasources": {
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9_-]+$": {
"type": "object",
"properties": {
"datasource_name": {
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+$"
},
"clusters": {
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9_-]+$": {
"type": "object",
"properties": {
"cluster_name": {
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+$"
}
},
"required": ["cluster_name"]
}
}
}
},
"required": ["datasource_name", "clusters"]
}
}
}
},
"required": ["datasources"]
}
68 changes: 68 additions & 0 deletions tests/scripts/helpers/import_metadata_json_validate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
"""
Copyright (c) 2023, 2023 Red Hat, IBM Corporation and others.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import jsonschema
from jsonschema import FormatChecker
from jsonschema.exceptions import ValidationError
from helpers.import_metadata_json_schema import import_metadata_json_schema

JSON_NULL_VALUES = ("is not of type 'string'", "is not of type 'integer'", "is not of type 'number'")
VALUE_MISSING = " cannot be empty or null!"

def validate_import_metadata_json(import_metadata_json, json_schema):
errorMsg = ""
try:
# create a validator with the format checker
print("Validating json against the json schema...")
validator = jsonschema.Draft7Validator(json_schema, format_checker=FormatChecker())

# validate the JSON data against the schema
errors = ""
errors = list(validator.iter_errors(import_metadata_json))
print("Validating json against the json schema...done")
errorMsg = validate_import_metadata_json_values(import_metadata_json)

if errors:
custom_err = ValidationError(errorMsg)
errors.append(custom_err)
return errors
else:
return errorMsg
except ValidationError as err:
print("Received a VaidationError")

# Check if the exception is due to empty or null required parameters and prepare the response accordingly
if any(word in err.message for word in JSON_NULL_VALUES):
errorMsg = "Parameters" + VALUE_MISSING
return errorMsg
# Modify the error response in case of additional properties error
elif str(err.message).__contains__('('):
errorMsg = str(err.message).split('(')
return errorMsg[0]
else:
return err.message

def validate_import_metadata_json_values(metadata):
validationErrorMsg = ""

for key in metadata.keys():

# Check if any of the key is empty or null
if not (str(metadata[key]) and str(metadata[key]).strip()):
validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING])

return validationErrorMsg.lstrip(',')

Original file line number Diff line number Diff line change
Expand Up @@ -239,3 +239,95 @@ def list_experiments(results=None, recommendations=None, latest=None, experiment
response = requests.get(url)
print("Response status code = ", response.status_code)
return response


# Description: This function obtains the list of datasources from Kruize Autotune using datasources API
# Input Parameters: None
def list_datasources(name=None):
print("\nListing the datasources...")
query_params = {}

if name is not None:
query_params['name'] = name

query_string = "&".join(f"{key}={value}" for key, value in query_params.items())

url = URL + "/datasources"
if query_string:
url += "?" + query_string
print("URL = ", url)
response = requests.get(url)

print("PARAMS = ", query_params)
print("Response status code = ", response.status_code)
print("\n************************************************************")
print(response.text)
print("\n************************************************************")
return response


# Description: This function validates the input json and imports metadata using POST dsmetadata API to Kruize Autotune
# Input Parameters: datasource input json
def import_metadata(input_json_file, invalid_header=False):
json_file = open(input_json_file, "r")
input_json = json.loads(json_file.read())
print("\n************************************************************")
pretty_json_str = json.dumps(input_json, indent=4)
print(pretty_json_str)
print("\n************************************************************")

# read the json
print("\nImporting the metadata...")

url = URL + "/dsmetadata"
print("URL = ", url)

headers = {'content-type': 'application/xml'}
if invalid_header:
print("Invalid header")
response = requests.post(url, json=input_json, headers=headers)
else:
response = requests.post(url, json=input_json)

print("Response status code = ", response.status_code)
try:
# Parse the response content as JSON into a Python dictionary
response_json = response.json()

# Check if the response_json is a valid JSON object or array
if isinstance(response_json, (dict, list)):
# Convert the response_json back to a JSON-formatted string with double quotes and pretty print it
pretty_response_json_str = json.dumps(response_json, indent=4)

# Print the JSON string
print(pretty_response_json_str)
else:
print("Invalid JSON format in the response.")
print(response.text) # Print the response text as-is
except json.JSONDecodeError:
print("Response content is not valid JSON.")
print(response.text) # Print the response text as-is
return response


# Description: This function deletes the metadata and posts the metadata using dsmetadata API to Kruize Autotune
# Input Parameters: datasource input json
def delete_metadata(input_json_file, invalid_header=False):
json_file = open(input_json_file, "r")
input_json = json.loads(json_file.read())

print("\nDeleting the metadata...")

url = URL + "/dsmetadata"
print("URL = ", url)

headers = {'content-type': 'application/xml'}
if invalid_header:
print("Invalid header")
response = requests.delete(url, json=input_json, headers=headers)
else:
response = requests.delete(url, json=input_json)

print(response)
print("Response status code = ", response.status_code)
return response
34 changes: 34 additions & 0 deletions tests/scripts/helpers/list_datasources_json_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
list_datasources_json_schema = {
"type": "object",
"properties": {
"version": {
"type": "string"
},
"datasources": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"provider": {
"type": "string"
},
"serviceName": {
"type": "string"
},
"namespace": {
"type": "string"
},
"url": {
"type": "string",
"format": "uri"
}
},
"required": ["name", "provider", "serviceName", "namespace", "url"]
}
}
},
"required": ["version", "datasources"]
}
Loading

0 comments on commit 3aa0c47

Please sign in to comment.