Skip to content

Commit

Permalink
Merge pull request #1211 from shreyabiradar07/test_multiple_import
Browse files Browse the repository at this point in the history
Test multiple metadata import without datasource connection
  • Loading branch information
dinogun authored Jun 26, 2024
2 parents 848aef2 + c3a8191 commit 2c4518a
Show file tree
Hide file tree
Showing 3 changed files with 176 additions and 1 deletion.
84 changes: 84 additions & 0 deletions tests/scripts/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
LIST_METADATA_ERROR_MSG = ("Metadata for a given datasource - %s, cluster name - %s, namespace - %s "
"either does not exist or is not valid")
LIST_METADATA_DATASOURCE_NAME_CLUSTER_NAME_ERROR_MSG = "Metadata for a given datasource name - %s, cluster_name - %s either does not exist or is not valid"
IMPORT_METADATA_DATASOURCE_CONNECTION_FAILURE_MSG = "Metadata cannot be imported, datasource connection refused or timed out"

# Kruize Recommendations Notification codes
NOTIFICATION_CODE_FOR_RECOMMENDATIONS_AVAILABLE = "111000"
Expand Down Expand Up @@ -935,6 +936,89 @@ def delete_namespace(namespace_name):
else:
print(f"Exception deleting namespace: {e}")


def scale_deployment(namespace, deployment_name, replicas):
"""
Scale a Kubernetes Deployment to the desired number of replicas.
This function scales a specified Deployment in a given namespace to a specified number
of replicas using the Kubernetes Python client library. It achieves this by creating
a Scale object and using the AppsV1Api to update the Deployment's scale.
Args:
- namespace (str): The namespace of the Deployment.
- deployment_name (str): The name of the Deployment.
- replicas (int): The desired number of replicas.
Returns:
None
"""
config.load_kube_config() # Load kube config from default location

# Create an API client
apps_v1 = client.AppsV1Api()

# Define the scale object
scale = client.V1Scale(
api_version='autoscaling/v1',
kind='Scale',
metadata=client.V1ObjectMeta(name=deployment_name, namespace=namespace),
spec=client.V1ScaleSpec(replicas=replicas)
)

# Scale the deployment
try:
response = apps_v1.replace_namespaced_deployment_scale(
name=deployment_name,
namespace=namespace,
body=scale
)
print(f"Deployment {deployment_name} scaled to {replicas} replicas successfully.")
except client.exceptions.ApiException as e:
print(f"Error scaling deployment {deployment_name}: {e}")


def scale_statefulset(namespace, statefulset_name, replicas):
"""
Scale a Kubernetes Statefulset to the desired number of replicas.
This function scales a specified Statefulset in a given namespace to a specified number
of replicas using the Kubernetes Python client library. It achieves this by creating
a Scale object and using the AppsV1Api to update the Statefulset's scale.
Args:
- namespace (str): The namespace of the Deployment.
- statefulset_name (str): The name of the Statefulset.
- replicas (int): The desired number of replicas.
Returns:
None
"""
config.load_kube_config() # Load kube config from default location

# Create an API client
apps_v1 = client.AppsV1Api()

# Define the scale object
scale = client.V1Scale(
api_version='autoscaling/v1',
kind='Scale',
metadata=client.V1ObjectMeta(name=statefulset_name, namespace=namespace),
spec=client.V1ScaleSpec(replicas=replicas)
)

# Scale the statefulset
try:
response = apps_v1.replace_namespaced_stateful_set_scale(
name=statefulset_name,
namespace=namespace,
body=scale
)
print(f"StatefulSet {statefulset_name} scaled to {replicas} replicas successfully.")
except client.exceptions.ApiException as e:
print(f"Error scaling statefulset {statefulset_name}: {e}")


# validate duration_in_hours decimal precision
def validate_duration_in_hours_decimal_precision(duration_in_hours):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@ Here are the test scenarios:
- Post the same datasource again
- Test with invalid values such as blank, null or an invalid value for various keys in the dsmetadata input request json
- Validate error messages when the mandatory fields are missing
- Repeated metadata imports for the same datasource by dynamically creating and deleting namespaces between two metadata
import actions by validating the newly created namespaces by verifying listing metadata output after the second import
metadata invocation
- Repeated metadata imports without datasource connection by dynamically scaling down prometheus resources to zero replicas
and validating the behaviour of import metadata resulting in an error when datasource cannot be connected. Additionally,
to verify the output of list metadata returns the metadata from the DB after the second import metadata invocation.

### **List Metadata API tests**

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,8 @@ def test_import_metadata_mandatory_fields(cluster_type, field, expected_status_c
def test_repeated_metadata_import(cluster_type):
"""
Test Description: This test validates the response status code of /dsmetadata API by specifying the
same datasource name
same datasource name by creating and deleting namespaces between two import metadata calls and validating the newly
created namespaces by verifying listing metadata output after the second import metadata invocation
"""
input_json_file = "../json_files/import_metadata.json"
json_data = json.load(open(input_json_file))
Expand Down Expand Up @@ -224,3 +225,87 @@ def test_repeated_metadata_import(cluster_type):

delete_namespace("repeated-metadata-import")
delete_namespace("local-monitoring-test")


@pytest.mark.negative
def test_repeated_metadata_import_without_datasource_connection(cluster_type):
"""
Test Description: This test validates the response status code of POST /dsmetadata API by specifying the
same datasource name with repeated metadata imports by bringing down prometheus server instance to validate the behaviour
of import metadata when datasource cannot be connected resulting in an error and additionally verifying list metadata
output returns the metadata from the DB after the second import metadata invocation.
"""
input_json_file = "../json_files/import_metadata.json"
json_data = json.load(open(input_json_file))

datasource_name = json_data['datasource_name']
print("datasource_name = ", datasource_name)

form_kruize_url(cluster_type)

response = delete_metadata(input_json_file)
print("delete metadata = ", response.status_code)

# Import metadata using the specified json
response = import_metadata(input_json_file)
metadata_json = response.json()

assert response.status_code == SUCCESS_STATUS_CODE

# Validate the json against the json schema
errorMsg = validate_import_metadata_json(metadata_json, import_metadata_json_schema)
assert errorMsg == ""

json_data = json.load(open(input_json_file))
datasource = json_data['datasource_name']
# Currently only default cluster_name is supported by kruize
cluster_name = "default"
response = list_metadata(datasource=datasource, cluster_name=cluster_name)

list_metadata_json = response.json()
assert response.status_code == SUCCESS_200_STATUS_CODE

if cluster_type == "minikube":
namespace = "monitoring"
elif cluster_type == "openshift":
namespace = "openshift-monitoring"

# Validate the json values
import_metadata_json = read_json_data_from_file(input_json_file)
validate_list_metadata_parameters(import_metadata_json, list_metadata_json, cluster_name=cluster_name, namespace=namespace)


# Scaling down prometheus deployment and statefulset to zero replicas to bring down prometheus datasource connection
scale_deployment(namespace, "prometheus-operator", 0)
scale_deployment(namespace, "prometheus-adapter", 0)
scale_statefulset(namespace, "prometheus-k8s", 0)
time.sleep(10)

# Repeated Import metadata using the specified json
response = import_metadata(input_json_file)
metadata_json = response.json()

assert response.status_code == ERROR_STATUS_CODE
assert metadata_json['message'] == IMPORT_METADATA_DATASOURCE_CONNECTION_FAILURE_MSG

json_data = json.load(open(input_json_file))
datasource = json_data['datasource_name']
# Currently only default cluster_name is supported by kruize
cluster_name = "default"
response = list_metadata(datasource=datasource, cluster_name=cluster_name)

list_metadata_json = response.json()
assert response.status_code == SUCCESS_200_STATUS_CODE

# Validate the json values
import_metadata_json = read_json_data_from_file(input_json_file)
validate_list_metadata_parameters(import_metadata_json, list_metadata_json, cluster_name=cluster_name, namespace=namespace)

#validate namespaces
response = delete_metadata(input_json_file)
print("delete metadata = ", response.status_code)

scale_deployment(namespace, "prometheus-operator", 1)
scale_deployment(namespace, "prometheus-adapter", 2)
scale_statefulset(namespace, "prometheus-k8s", 2)
time.sleep(90)

0 comments on commit 2c4518a

Please sign in to comment.