From 3d1884245ac66de556778c2c558b1c82e1801459 Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:19:13 +0530 Subject: [PATCH 1/8] Add API adpater for deployment with related incidents --- .../dora/api/resources/incident_resources.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/apiserver/dora/api/resources/incident_resources.py b/apiserver/dora/api/resources/incident_resources.py index bf1a3a91..864bcf20 100644 --- a/apiserver/dora/api/resources/incident_resources.py +++ b/apiserver/dora/api/resources/incident_resources.py @@ -1,3 +1,6 @@ +from typing import Dict, List +from dora.api.resources.deployment_resources import adapt_deployment +from dora.service.deployments.models.models import Deployment from dora.store.models.incidents import Incident from dora.api.resources.core_resources import adapt_user_info @@ -31,3 +34,17 @@ def adapt_incident( "summary": incident.meta.get("summary"), "incident_type": incident.incident_type.value, } + + +def adapt_deployments_with_related_incidents( + deployment: Deployment, + deployment_incidents_map: Dict[Deployment, List[Incident]], + username_user_map: dict = None, +): + deployment_response = adapt_deployment(deployment, username_user_map) + incidents = deployment_incidents_map.get(deployment, []) + incident_response = list( + map(lambda incident: adapt_incident(incident, username_user_map), incidents) + ) + deployment_response["incidents"] = incident_response + return deployment_response From c62f9da14a045bbaab1f524dc6ddf53636e14ced Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:19:39 +0530 Subject: [PATCH 2/8] Add string util for uuid --- apiserver/dora/utils/string.py | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 apiserver/dora/utils/string.py diff --git a/apiserver/dora/utils/string.py b/apiserver/dora/utils/string.py new file mode 100644 index 00000000..a56caf06 --- /dev/null +++ b/apiserver/dora/utils/string.py @@ -0,0 +1,5 @@ +from uuid import uuid4 + + +def uuid4_str(): + return str(uuid4()) From 009e0cbce3d8d403e4c3cfbc9961a13e6c8f8fbb Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:20:15 +0530 Subject: [PATCH 3/8] Update IncidentsRepoService to add query for fetching team incidents --- apiserver/dora/store/repos/incidents.py | 35 ++++++++++++++++++++----- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/apiserver/dora/store/repos/incidents.py b/apiserver/dora/store/repos/incidents.py index f83f8d66..d835f5dc 100644 --- a/apiserver/dora/store/repos/incidents.py +++ b/apiserver/dora/store/repos/incidents.py @@ -1,11 +1,12 @@ from typing import List -from dora.store.models.incidents.enums import IncidentType +from sqlalchemy import and_ from dora.store import rollback_on_exc, session from dora.store.models.incidents import ( Incident, IncidentFilter, IncidentOrgIncidentServiceMap, TeamIncidentService, + IncidentStatus, ) from dora.utils.time import Interval @@ -21,6 +22,32 @@ def _apply_incident_filter(self, query, incident_filter: IncidentFilter = None): def get_resolved_team_incidents( self, team_id: str, interval: Interval, incident_filter: IncidentFilter = None ) -> List[Incident]: + query = self._get_team_incidents_query(team_id, incident_filter) + + query = query.filter( + and_( + Incident.status == IncidentStatus.RESOLVED.value, + Incident.resolved_date.between(interval.from_time, interval.to_time), + ) + ) + + return query.all() + + @rollback_on_exc + def get_team_incidents( + self, team_id: str, interval: Interval, incident_filter: IncidentFilter = None + ) -> List[Incident]: + query = self._get_team_incidents_query(team_id, incident_filter) + + query = query.filter( + Incident.creation_date.between(interval.from_time, interval.to_time), + ) + + return query.all() + + def _get_team_incidents_query( + self, team_id: str, incident_filter: IncidentFilter = None + ): query = ( session.query(Incident) .join( @@ -38,9 +65,5 @@ def get_resolved_team_incidents( ) query = self._apply_incident_filter(query, incident_filter) - query = query.filter(Incident.incident_type == IncidentType.ALERT) - query = query.filter( - Incident.resolved_date.between(interval.from_time, interval.to_time), - ) - return query.order_by(Incident.creation_date.asc()).all() + return query.order_by(Incident.creation_date.asc()) From 17a678744484dbd742a4161664f3888043d5013d Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:20:48 +0530 Subject: [PATCH 4/8] Add incident getter and deployment incident mapper in IncidentService --- apiserver/dora/service/incidents/incidents.py | 55 ++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/apiserver/dora/service/incidents/incidents.py b/apiserver/dora/service/incidents/incidents.py index 62e7b74a..475c0628 100644 --- a/apiserver/dora/service/incidents/incidents.py +++ b/apiserver/dora/service/incidents/incidents.py @@ -1,4 +1,6 @@ -from typing import List +from collections import defaultdict +from typing import List, Dict +from dora.service.deployments.models.models import Deployment from dora.service.incidents.incident_filter import apply_incident_filter from dora.store.models.incidents.filter import IncidentFilter from dora.store.models.settings import EntityType, SettingType @@ -36,6 +38,57 @@ def get_resolved_team_incidents( team_id, interval, incident_filter ) + def get_team_incidents(self, team_id: str, interval: Interval) -> List[Incident]: + incident_filter: IncidentFilter = apply_incident_filter( + entity_type=EntityType.TEAM, + entity_id=team_id, + setting_types=[ + SettingType.INCIDENT_SETTING, + SettingType.INCIDENT_TYPES_SETTING, + ], + ) + return self._incidents_repo_service.get_team_incidents( + team_id, interval, incident_filter + ) + + def get_deployment_incidents_map( + self, deployments: List[Deployment], incidents: List[Incident] + ): + deployments = sorted(deployments, key=lambda x: x.conducted_at) + incidents = sorted(incidents, key=lambda x: x.creation_date) + incidents_pointer = 0 + + deployment_incidents_map: Dict[Deployment, List[Incident]] = defaultdict(list) + + for current_deployment, next_deployment in zip( + deployments, deployments[1:] + [None] + ): + current_deployment_incidents = [] + + if incidents_pointer >= len(incidents): + deployment_incidents_map[ + current_deployment + ] = current_deployment_incidents + continue + + while incidents_pointer < len(incidents): + incident = incidents[incidents_pointer] + + if incident.creation_date >= current_deployment.conducted_at and ( + next_deployment is None + or incident.creation_date < next_deployment.conducted_at + ): + current_deployment_incidents.append(incident) + incidents_pointer += 1 + elif incident.creation_date < current_deployment.conducted_at: + incidents_pointer += 1 + else: + break + + deployment_incidents_map[current_deployment] = current_deployment_incidents + + return deployment_incidents_map + def get_incident_service(): return IncidentService(IncidentsRepoService(), get_settings_service()) From 68aa6eba1f7dc4128e774f882a647f8d2e1e5b82 Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:21:30 +0530 Subject: [PATCH 5/8] Add method for team all deploy in interval --- .../service/deployments/deployment_service.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/apiserver/dora/service/deployments/deployment_service.py b/apiserver/dora/service/deployments/deployment_service.py index 7e0cacf2..4278615c 100644 --- a/apiserver/dora/service/deployments/deployment_service.py +++ b/apiserver/dora/service/deployments/deployment_service.py @@ -90,6 +90,40 @@ def get_filtered_team_repos_with_workflow_configured_deployments( return team_repos_with_workflow_deployments + def get_team_all_deployments_in_interval( + self, + team_id: str, + interval, + pr_filter: PRFilter = None, + workflow_filter: WorkflowFilter = None, + ) -> List[Deployment]: + + team_repos = self._get_team_repos_by_team_id(team_id) + ( + team_repos_using_workflow_deployments, + team_repos_using_pr_deployments, + ) = self.get_filtered_team_repos_by_deployment_config(team_repos) + + deployments_using_workflow = self.workflow_based_deployments_service.get_repos_all_deployments_in_interval( + self._get_repo_ids_from_team_repos(team_repos_using_workflow_deployments), + interval, + workflow_filter, + ) + deployments_using_pr = ( + self.pr_based_deployments_service.get_repos_all_deployments_in_interval( + self._get_repo_ids_from_team_repos(team_repos_using_pr_deployments), + interval, + pr_filter, + ) + ) + + deployments: List[Deployment] = ( + deployments_using_workflow + deployments_using_pr + ) + sorted_deployments = self._sort_deployments_by_date(deployments) + + return sorted_deployments + def _get_team_repos_by_team_id(self, team_id: str) -> List[TeamRepos]: return self.code_repo_service.get_active_team_repos_by_team_id(team_id) @@ -99,6 +133,9 @@ def _get_repo_ids_from_team_repos(self, team_repos: List[TeamRepos]) -> List[str def get_filtered_team_repos_by_deployment_config( self, team_repos: List[TeamRepos] ) -> Tuple[List[TeamRepos], List[TeamRepos]]: + """ + Splits the input TeamRepos list into two TeamRepos List, TeamRepos using workflow and TeamRepos using pr deployments. + """ return self._filter_team_repos_using_workflow_deployments( team_repos ), self._filter_team_repos_using_pr_deployments(team_repos) From bafda7783bce7be78e4f6d525b214a3a3a6a926b Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:22:19 +0530 Subject: [PATCH 6/8] Add incidents test factory --- apiserver/tests/factories/models/__init__.py | 2 + apiserver/tests/factories/models/incidents.py | 40 +++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 apiserver/tests/factories/models/incidents.py diff --git a/apiserver/tests/factories/models/__init__.py b/apiserver/tests/factories/models/__init__.py index e69de29b..69062ee3 100644 --- a/apiserver/tests/factories/models/__init__.py +++ b/apiserver/tests/factories/models/__init__.py @@ -0,0 +1,2 @@ +from .code import get_repo_workflow_run +from .incidents import get_incident diff --git a/apiserver/tests/factories/models/incidents.py b/apiserver/tests/factories/models/incidents.py new file mode 100644 index 00000000..6997dc43 --- /dev/null +++ b/apiserver/tests/factories/models/incidents.py @@ -0,0 +1,40 @@ +from typing import List +from datetime import datetime +from dora.store.models.incidents.incidents import Incident +from dora.utils.string import uuid4_str + +from dora.utils.time import time_now + + +def get_incident( + id: str = uuid4_str(), + provider: str = "provider", + key: str = "key", + title: str = "title", + status: str = "status", + incident_number: int = 0, + creation_date: datetime = time_now(), + created_at: datetime = time_now(), + updated_at: datetime = time_now(), + resolved_date: datetime = time_now(), + acknowledged_date: datetime = time_now(), + assigned_to: str = "assigned_to", + assignees: List[str] = [], + meta: dict = {}, +) -> Incident: + return Incident( + id=id, + provider=provider, + key=key, + title=title, + status=status, + incident_number=incident_number, + created_at=created_at, + updated_at=updated_at, + creation_date=creation_date, + resolved_date=resolved_date, + assigned_to=assigned_to, + assignees=assignees, + acknowledged_date=acknowledged_date, + meta=meta, + ) From ebb5d98117ebc523b1ae9a1e668ecb610f3105c6 Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:22:40 +0530 Subject: [PATCH 7/8] Add tests for get_deployment_incidents_map --- .../test_deployment_incident_mapper.py | 119 ++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 apiserver/tests/service/Incidents/test_deployment_incident_mapper.py diff --git a/apiserver/tests/service/Incidents/test_deployment_incident_mapper.py b/apiserver/tests/service/Incidents/test_deployment_incident_mapper.py new file mode 100644 index 00000000..e17a6619 --- /dev/null +++ b/apiserver/tests/service/Incidents/test_deployment_incident_mapper.py @@ -0,0 +1,119 @@ +from datetime import timedelta +from dora.service.incidents.incidents import get_incident_service +from dora.utils.time import time_now + +from tests.factories.models import get_incident, get_repo_workflow_run + + +# No incidents, no deployments +def test_get_deployment_incidents_count_map_returns_empty_dict_when_given_no_incidents_no_deployments(): + incident_service = get_incident_service() + incidents = [] + deployments = [] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, + incidents, + ) + assert deployment_incidents_count_map == {} + + +# No incidents, some deployments +def test_get_deployment_incidents_count_map_returns_deployment_incident_count_map_when_given_no_incidents_some_deployments(): + incident_service = get_incident_service() + incidents = [] + deployments = [ + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=2)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(hours=6)), + ] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, + incidents, + ) + assert deployment_incidents_count_map == {deployments[0]: [], deployments[1]: []} + + +# Some incidents, no deployments +def test_get_deployment_incidents_count_map_returns_empty_dict_when_given_some_incidents_no_deployments(): + incident_service = get_incident_service() + incidents = [get_incident(creation_date=time_now() - timedelta(days=3))] + deployments = [] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, incidents + ) + assert deployment_incidents_count_map == {} + + +# One incident between two deployments +def test_get_deployment_incidents_count_map_returns_deployment_incident_count_map_when_given_one_incidents_bw_two_deployments(): + incident_service = get_incident_service() + incidents = [get_incident(creation_date=time_now() - timedelta(days=1))] + deployments = [ + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=2)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(hours=6)), + ] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, incidents + ) + assert deployment_incidents_count_map == { + deployments[0]: [incidents[0]], + deployments[1]: [], + } + + +# One incident before two deployments +def test_get_deployment_incidents_count_map_returns_deployment_incident_count_map_when_given_one_incidents_bef_two_deployments(): + incident_service = get_incident_service() + incidents = [get_incident(creation_date=time_now() - timedelta(days=3))] + deployments = [ + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=2)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(hours=6)), + ] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, incidents + ) + assert deployment_incidents_count_map == {deployments[0]: [], deployments[1]: []} + + +# One incident after two deployments +def test_get_deployment_incidents_count_map_returns_deployment_incident_count_map_when_given_one_incidents_after_two_deployments(): + incident_service = get_incident_service() + incidents = [get_incident(creation_date=time_now() - timedelta(hours=1))] + deployments = [ + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=2)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(hours=6)), + ] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, incidents + ) + assert deployment_incidents_count_map == { + deployments[0]: [], + deployments[1]: [incidents[0]], + } + + +# Multiple incidents and deployments +def test_get_deployment_incidents_count_map_returns_deployment_incident_count_map_when_given_multi_incidents_multi_deployments(): + incident_service = get_incident_service() + incidents = [ + get_incident(creation_date=time_now() - timedelta(days=5)), + get_incident(creation_date=time_now() - timedelta(days=3)), + get_incident(creation_date=time_now() - timedelta(hours=20)), + get_incident(creation_date=time_now() - timedelta(hours=1)), + ] + deployments = [ + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=7)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=6)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=4)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(days=2)), + get_repo_workflow_run(conducted_at=time_now() - timedelta(hours=6)), + ] + deployment_incidents_count_map = incident_service.get_deployment_incidents_map( + deployments, incidents + ) + assert deployment_incidents_count_map == { + deployments[0]: [], + deployments[1]: [incidents[0]], + deployments[2]: [incidents[1]], + deployments[3]: [incidents[2]], + deployments[4]: [incidents[3]], + } From 132c24da053131414dea25adf044f342d53ea8f2 Mon Sep 17 00:00:00 2001 From: Samad Yar Khan Date: Wed, 10 Apr 2024 14:23:18 +0530 Subject: [PATCH 8/8] Add API for deployments with related incidents --- apiserver/dora/api/incidents.py | 62 +++++++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 3 deletions(-) diff --git a/apiserver/dora/api/incidents.py b/apiserver/dora/api/incidents.py index 850ceb12..24ba4d74 100644 --- a/apiserver/dora/api/incidents.py +++ b/apiserver/dora/api/incidents.py @@ -1,13 +1,23 @@ +import json from typing import Dict, List from datetime import datetime from flask import Blueprint -from voluptuous import Required, Schema, Coerce, All +from voluptuous import Required, Schema, Coerce, All, Optional +from dora.service.deployments.deployment_service import ( + get_deployments_service, +) +from dora.service.deployments.models.models import Deployment +from dora.store.models.code.workflows.filter import WorkflowFilter +from dora.utils.time import Interval from dora.service.incidents.incidents import get_incident_service -from dora.api.resources.incident_resources import adapt_incident +from dora.api.resources.incident_resources import ( + adapt_deployments_with_related_incidents, + adapt_incident, +) from dora.store.models.incidents import Incident -from dora.api.request_utils import queryschema +from dora.api.request_utils import coerce_workflow_filter, queryschema from dora.service.query_validator import get_query_validator from dora.store.models import Users @@ -38,3 +48,49 @@ def get_resolved_incidents(team_id: str, from_time: datetime, to_time: datetime) # ToDo: Generate a user map return [adapt_incident(incident) for incident in resolved_incidents] + + +@app.route("/teams//deployments_with_related_incidents", methods=["GET"]) +@queryschema( + Schema( + { + Required("from_time"): All(str, Coerce(datetime.fromisoformat)), + Required("to_time"): All(str, Coerce(datetime.fromisoformat)), + Optional("pr_filter"): All(str, Coerce(json.loads)), + Optional("workflow_filter"): All(str, Coerce(coerce_workflow_filter)), + } + ), +) +def get_deployments_with_related_incidents( + team_id: str, + from_time: datetime, + to_time: datetime, + pr_filter: dict = None, + workflow_filter: WorkflowFilter = None, +): + query_validator = get_query_validator() + interval = Interval(from_time, to_time) + query_validator.team_validator(team_id) + + deployments: List[ + Deployment + ] = get_deployments_service().get_team_all_deployments_in_interval( + team_id, interval, pr_filter, workflow_filter + ) + + incident_service = get_incident_service() + + incidents: List[Incident] = incident_service.get_team_incidents(team_id, interval) + + deployment_incidents_map: Dict[ + Deployment, List[Incident] + ] = incident_service.get_deployment_incidents_map(deployments, incidents) + + return list( + map( + lambda deployment: adapt_deployments_with_related_incidents( + deployment, deployment_incidents_map + ), + deployments, + ) + )