From fd5e6b4f1b7ac2ad524080f4f8a04a5206e4c887 Mon Sep 17 00:00:00 2001 From: fabrizio-credo Date: Thu, 27 Oct 2022 11:08:55 -0700 Subject: [PATCH 01/11] Added tags property to models - Model tags logic - Updated regression and classification with extra parameter --- credoai/artifacts/model/base_model.py | 14 +++++++++++++- credoai/artifacts/model/classification_model.py | 3 ++- credoai/artifacts/model/regression_model.py | 10 ++-------- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/credoai/artifacts/model/base_model.py b/credoai/artifacts/model/base_model.py index b8cccca8..ee1756e1 100644 --- a/credoai/artifacts/model/base_model.py +++ b/credoai/artifacts/model/base_model.py @@ -1,6 +1,6 @@ """Abstract class for model artifacts used by `Lens`""" from abc import ABC -from typing import List +from typing import List, Optional from credoai.utils import ValidationError from credoai.utils.model_utils import get_model_info @@ -31,16 +31,28 @@ def __init__( necessary_functions: List[str], name: str, model_like, + tags: Optional[dict] = None, ): self.type = type self.name = name self.model_like = model_like + self.tags = tags self.model_info = get_model_info(model_like) self._validate(necessary_functions) self._build(possible_functions) self._update_functionality() + @property + def tags(self): + return self._tags + + @tags.setter + def tags(self, value): + if not isinstance(value, dict) and value is not None: + raise ValidationError("Tags must be of type dictionary") + self._tags = value + def _build(self, function_names: List[str]): """ Makes the necessary methods available in the class diff --git a/credoai/artifacts/model/classification_model.py b/credoai/artifacts/model/classification_model.py index 03ee9234..a66c0161 100644 --- a/credoai/artifacts/model/classification_model.py +++ b/credoai/artifacts/model/classification_model.py @@ -20,13 +20,14 @@ class ClassificationModel(Model): the class labels probabilities for each sample. """ - def __init__(self, name: str, model_like=None): + def __init__(self, name: str, model_like=None, tags=None): super().__init__( "Classification", ["predict", "predict_proba"], ["predict"], name, model_like, + tags, ) def _update_functionality(self): diff --git a/credoai/artifacts/model/regression_model.py b/credoai/artifacts/model/regression_model.py index 908cb4a7..b423974f 100644 --- a/credoai/artifacts/model/regression_model.py +++ b/credoai/artifacts/model/regression_model.py @@ -20,14 +20,8 @@ class RegressionModel(Model): the class labels probabilities for each sample. """ - def __init__(self, name: str, model_like=None): - super().__init__( - "Regression", - ["predict"], - ["predict"], - name, - model_like, - ) + def __init__(self, name: str, model_like=None, tags=None): + super().__init__("Regression", ["predict"], ["predict"], name, model_like, tags) class DummyRegression: From 76c6b4536a566869d13c8e3635355c9db6580bd5 Mon Sep 17 00:00:00 2001 From: fabrizio-credo Date: Thu, 27 Oct 2022 14:38:47 -0700 Subject: [PATCH 02/11] Added tags - Created tags property and setter in evidence_requirements - Updated governance to extract unique_tags and display info about it --- credoai/evidence/evidence_requirement.py | 5 +++++ credoai/governance/governance.py | 12 ++++++++++++ 2 files changed, 17 insertions(+) diff --git a/credoai/evidence/evidence_requirement.py b/credoai/evidence/evidence_requirement.py index 5b6fe54b..8109ed11 100644 --- a/credoai/evidence/evidence_requirement.py +++ b/credoai/evidence/evidence_requirement.py @@ -10,6 +10,7 @@ def __init__( self._evidence_type: str = data.get("evidence_type") self._label: dict = data.get("label") self._sensitive_features: List[str] = data.get("sensitive_features", []) + self._tags: dict = data.get("tags") def __str__(self) -> str: return f"{self.evidence_type}-EvidenceRequirement.label-{self.label}" @@ -21,3 +22,7 @@ def evidence_type(self): @property def label(self): return self._label + + @property + def tags(self): + return self._tags diff --git a/credoai/governance/governance.py b/credoai/governance/governance.py index 23de7c84..cc53814a 100644 --- a/credoai/governance/governance.py +++ b/credoai/governance/governance.py @@ -157,10 +157,22 @@ def register( ) ) + # Extract unique tags + self._unique_tags: List[dict] = [] + for x in self._evidence_requirements: + if x.tags not in self._unique_tags: + self._unique_tags.append(x.tags) + self._unique_tags = [x for x in self._unique_tags if x] + global_logger.info( f"Successfully registered with {len(self._evidence_requirements)} evidence requirements" ) + if self._unique_tags: + global_logger.info( + f"The following tags have being found in the evidence requirements: {self._unique_tags}" + ) + self.clear_evidence() def __parse_json_api(self, json_str): From 650a86cefa05ed9fccedd979452917b9302b71e6 Mon Sep 17 00:00:00 2001 From: fabrizio-credo Date: Thu, 27 Oct 2022 15:54:59 -0700 Subject: [PATCH 03/11] Modified lens to issue warning if tags are requested and not found --- credoai/lens/lens.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/credoai/lens/lens.py b/credoai/lens/lens.py index b8590828..2bee75a8 100644 --- a/credoai/lens/lens.py +++ b/credoai/lens/lens.py @@ -11,8 +11,7 @@ from credoai.evaluators.evaluator import Evaluator from credoai.governance import Governance from credoai.lens.pipeline_creator import PipelineCreator -from credoai.utils import (ValidationError, check_subset, flatten_list, - global_logger) +from credoai.utils import ValidationError, check_subset, flatten_list, global_logger # Custom type Pipeline = List[Union[Evaluator, Tuple[Evaluator, str, dict]]] @@ -416,6 +415,11 @@ def _validate(self): "Sensitive features should have the same shape across assessment and training data" ) + if self.model is not None and self.gov is not None: + if self.model.tags not in self.gov._unique_tags: + mes = f"Model tags: {self.model.tags} are not among the once found in the governance object: {self.gov._unique_tags}" + self.logger.warning(mes) + @staticmethod def _consume_pipeline_step(step): def safe_get(step, index): From 561a3aa0303d4d329a5fac760b4b14e96d15f7ef Mon Sep 17 00:00:00 2001 From: Ian Eisenberg Date: Thu, 27 Oct 2022 17:30:48 -0700 Subject: [PATCH 04/11] added models to governance export --- credoai/artifacts/model/base_model.py | 1 - credoai/governance/credo_api.py | 16 ++------- credoai/governance/governance.py | 51 ++++++++++++++++----------- credoai/lens/lens.py | 5 +-- 4 files changed, 36 insertions(+), 37 deletions(-) diff --git a/credoai/artifacts/model/base_model.py b/credoai/artifacts/model/base_model.py index ee1756e1..59adddf1 100644 --- a/credoai/artifacts/model/base_model.py +++ b/credoai/artifacts/model/base_model.py @@ -33,7 +33,6 @@ def __init__( model_like, tags: Optional[dict] = None, ): - self.type = type self.name = name self.model_like = model_like diff --git a/credoai/governance/credo_api.py b/credoai/governance/credo_api.py index a5cc0eb5..dfd45ae8 100644 --- a/credoai/governance/credo_api.py +++ b/credoai/governance/credo_api.py @@ -105,9 +105,7 @@ def get_assessment_plan(self, url: str): return self._client.get(url) - def create_assessment( - self, use_case_id: str, policy_pack_id: str, evidences: List[dict] - ): + def create_assessment(self, use_case_id: str, data: dict): """ Upload evidences to API server. @@ -115,10 +113,8 @@ def create_assessment( ---------- use_case_id : str use case id - policy_pack_id : str - policy pack id, ie: FAIR+1 - evidences: list[dict] - list of evidences + data : dict + assessment data generated by Governance Raises ------ @@ -127,10 +123,4 @@ def create_assessment( """ path = f"use_cases/{use_case_id}/assessments" - - data = { - "policy_pack_id": policy_pack_id, - "evidences": evidences, - "$type": "assessments", - } return self._client.post(path, data) diff --git a/credoai/governance/governance.py b/credoai/governance/governance.py index cc53814a..46763ce6 100644 --- a/credoai/governance/governance.py +++ b/credoai/governance/governance.py @@ -71,6 +71,7 @@ def __init__( self._policy_pack_id: Optional[str] = None self._evidence_requirements: List[EvidenceRequirement] = [] self._evidences: List[Evidence] = [] + self._model = None self._plan: Optional[dict] = None if credo_api_client: @@ -182,6 +183,15 @@ def __parse_json_api(self, json_str): def registered(self): return bool(self._plan) + def add_evidence(self, evidences: Union[Evidence, List[Evidence]]): + """ + Add evidences + """ + self._evidences += wrap_list(evidences) + + def clear_evidence(self): + self.set_evidence([]) + def get_evidence_requirements(self): """ Returns evidence requirements @@ -193,20 +203,15 @@ def get_evidence_requirements(self): """ return self._evidence_requirements - def clear_evidence(self): - self.set_evidence([]) - def set_evidence(self, evidences: List[Evidence]): """ Update evidences """ self._evidences = evidences - def add_evidence(self, evidences: Union[Evidence, List[Evidence]]): - """ - Add evidences - """ - self._evidences += wrap_list(evidences) + def set_model(self, model): + prepared_model = {"name": model.name, "tags": model.tags} + self._model = prepared_model def match_requirements(self): missing = [] @@ -235,12 +240,10 @@ def export(self, filename=None): return False to_return = self.match_requirements() - evidences = self._prepare_evidences() - if filename is None: - self._api_export(evidences) + self._api_export() else: - self._file_export(evidences, filename) + self._file_export(filename) if to_return: export_status = "All requirements were matched." @@ -250,11 +253,11 @@ def export(self, filename=None): global_logger.info(export_status) return to_return - def _api_export(self, evidences): + def _api_export(self): global_logger.info( - f"Uploading {len(evidences)} evidences.. for use_case_id={self._use_case_id} policy_pack_id={self._policy_pack_id}" + f"Uploading {len(self._evidences)} evidences.. for use_case_id={self._use_case_id} policy_pack_id={self._policy_pack_id}" ) - self._api.create_assessment(self._use_case_id, self._policy_pack_id, evidences) + self._api.create_assessment(self._use_case_id, self._prepare_export_data()) def _check_inclusion(self, label, evidence): matching_evidence = [] @@ -270,19 +273,25 @@ def _check_inclusion(self, label, evidence): return False return matching_evidence - def _file_export(self, evidences, filename): + def _file_export(self, filename): global_logger.info( - f"Saving {len(evidences)} evidences to {filename}.. for use_case_id={self._use_case_id} policy_pack_id={self._policy_pack_id} " + f"Saving {len(self._evidences)} evidences to {filename}.. for use_case_id={self._use_case_id} policy_pack_id={self._policy_pack_id} " ) + data = self._prepare_export_data() + meta = {"client": "Credo AI Lens", "version": __version__} + data = json_dumps(serialize(data=data, meta=meta)) + with open(filename, "w") as f: + f.write(data) + + def _prepare_export_data(self): + evidences = self._prepare_evidences() data = { "policy_pack_id": self._policy_pack_id, + "models": [self._model] if self._model else None, "evidences": evidences, "$type": "assessments", } - meta = {"client": "Credo AI Lens", "version": __version__} - data = json_dumps(serialize(data=data, meta=meta)) - with open(filename, "w") as f: - f.write(data) + return data def _prepare_evidences(self): evidences = list(map(lambda e: e.struct(), self._evidences)) diff --git a/credoai/lens/lens.py b/credoai/lens/lens.py index b8590828..9b841f6e 100644 --- a/credoai/lens/lens.py +++ b/credoai/lens/lens.py @@ -11,8 +11,7 @@ from credoai.evaluators.evaluator import Evaluator from credoai.governance import Governance from credoai.lens.pipeline_creator import PipelineCreator -from credoai.utils import (ValidationError, check_subset, flatten_list, - global_logger) +from credoai.utils import ValidationError, check_subset, flatten_list, global_logger # Custom type Pipeline = List[Union[Evaluator, Tuple[Evaluator, str, dict]]] @@ -184,6 +183,8 @@ def send_to_governance(self, overwrite_governance=False): """ evidence = self.get_evidence() if self.gov: + if self.model: + self.gov.set_model(self.model) if overwrite_governance: self.gov.set_evidence(evidence) self.logger.info( From 9be8cfba4a36c0e6061b12c00584af595d4af34c Mon Sep 17 00:00:00 2001 From: Ian Eisenberg Date: Fri, 28 Oct 2022 09:29:31 -0700 Subject: [PATCH 05/11] updated dataset names, added training/assessment to governance, updated tag functionality for reqs --- credoai/evaluators/evaluator.py | 6 +++--- credoai/governance/governance.py | 27 ++++++++++++++++++++++++--- credoai/lens/lens.py | 14 +++++++++++--- 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/credoai/evaluators/evaluator.py b/credoai/evaluators/evaluator.py index 194c2bad..0b94359e 100644 --- a/credoai/evaluators/evaluator.py +++ b/credoai/evaluators/evaluator.py @@ -115,9 +115,9 @@ def _get_artifacts(self): artifacts = {} save_keys = { "model": "model_name", - "data": "data_name", - "assessment_data": "assessment_data_name", - "training_data": "training_data_name", + "data": "dataset_name", + "assessment_data": "assessment_dataset_name", + "training_data": "training_dataset_name", } for k in self.artifact_keys: save_key = save_keys.get(k, k) diff --git a/credoai/governance/governance.py b/credoai/governance/governance.py index 46763ce6..7040f8f9 100644 --- a/credoai/governance/governance.py +++ b/credoai/governance/governance.py @@ -192,16 +192,32 @@ def add_evidence(self, evidences: Union[Evidence, List[Evidence]]): def clear_evidence(self): self.set_evidence([]) - def get_evidence_requirements(self): + def get_evidence_requirements(self, tags: dict = None): """ Returns evidence requirements + Parameters + ---------- + tags : dict, optional + Tags to subset evidence requirements Returns ------- List[EvidenceRequirement] """ - return self._evidence_requirements + if tags: + requirements = [ + e for e in self._evidence_requirements if (not e.tags or e.tags == tags) + ] + else: + requirements = self._evidence_requirements + return requirements + + def get_model_tags(self): + if self._model: + return self._model["tags"] + else: + return None def set_evidence(self, evidences: List[Evidence]): """ @@ -209,8 +225,13 @@ def set_evidence(self, evidences: List[Evidence]): """ self._evidences = evidences - def set_model(self, model): + def set_artifacts(self, model, training_dataset=None, assessment_dataset=None): + global_logger.info(f"Adding model ({model.name}) to governance.") prepared_model = {"name": model.name, "tags": model.tags} + if training_dataset: + prepared_model["training_dataset_name"] = training_dataset.name + if assessment_dataset: + prepared_model["assessment_dataset_name"] = assessment_dataset.name self._model = prepared_model def match_requirements(self): diff --git a/credoai/lens/lens.py b/credoai/lens/lens.py index 9b841f6e..d240430b 100644 --- a/credoai/lens/lens.py +++ b/credoai/lens/lens.py @@ -75,13 +75,14 @@ def __init__( self.assessment_data = assessment_data self.training_data = training_data self.assessment_plan: dict = {} - self.gov = governance + self.gov = None self.pipeline: list = [] self.logger = global_logger if self.assessment_data and self.assessment_data.sensitive_features is not None: self.sens_feat_names = list(self.assessment_data.sensitive_features) else: self.sens_feat_names = [] + self._add_governance(governance) self._generate_pipeline(pipeline) # Can pass pipeline directly self._validate() @@ -183,8 +184,6 @@ def send_to_governance(self, overwrite_governance=False): """ evidence = self.get_evidence() if self.gov: - if self.model: - self.gov.set_model(self.model) if overwrite_governance: self.gov.set_evidence(evidence) self.logger.info( @@ -324,6 +323,15 @@ def _add( logger_message += f"Sensitive feature: {metadata['sensitive_feature']}" self.logger.info(logger_message) + def _add_governance(self, governance: Governance = None): + if governance is None: + return + self.gov = governance + if self.model: + self.gov.set_artifacts( + self.model, self.training_dataset, self.assessment_data + ) + def _cycle_add_through_ds_feat( self, pipeline_step, From 2c138937d1919ba1a573990684b033dface016c5 Mon Sep 17 00:00:00 2001 From: Ian Eisenberg Date: Fri, 28 Oct 2022 09:40:49 -0700 Subject: [PATCH 06/11] tags working --- credoai/governance/governance.py | 25 +++++++++++++++---------- credoai/lens/lens.py | 4 +--- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/credoai/governance/governance.py b/credoai/governance/governance.py index 7040f8f9..b32c0b3c 100644 --- a/credoai/governance/governance.py +++ b/credoai/governance/governance.py @@ -194,24 +194,27 @@ def clear_evidence(self): def get_evidence_requirements(self, tags: dict = None): """ - Returns evidence requirements + Returns evidence requirements. Each evidence requirement can have optional tags + (a dictionary). Parameters ---------- tags : dict, optional - Tags to subset evidence requirements + Tags to subset evidence requirements. If a model has been set, will default + to the model's tags. Evidence requirements will be returned that have no + tags or have the same tag as provided. Returns ------- List[EvidenceRequirement] """ - if tags: - requirements = [ - e for e in self._evidence_requirements if (not e.tags or e.tags == tags) - ] - else: - requirements = self._evidence_requirements - return requirements + if tags is None: + tags = self.get_model_tags() + + reqs = [ + e for e in self._evidence_requirements if (not e.tags or e.tags == tags) + ] + return reqs def get_model_tags(self): if self._model: @@ -226,7 +229,9 @@ def set_evidence(self, evidences: List[Evidence]): self._evidences = evidences def set_artifacts(self, model, training_dataset=None, assessment_dataset=None): - global_logger.info(f"Adding model ({model.name}) to governance.") + global_logger.info( + f"Adding model ({model.name}) to governance. Model has tags: {model.tags}" + ) prepared_model = {"name": model.name, "tags": model.tags} if training_dataset: prepared_model["training_dataset_name"] = training_dataset.name diff --git a/credoai/lens/lens.py b/credoai/lens/lens.py index d240430b..95c41801 100644 --- a/credoai/lens/lens.py +++ b/credoai/lens/lens.py @@ -328,9 +328,7 @@ def _add_governance(self, governance: Governance = None): return self.gov = governance if self.model: - self.gov.set_artifacts( - self.model, self.training_dataset, self.assessment_data - ) + self.gov.set_artifacts(self.model, self.training_data, self.assessment_data) def _cycle_add_through_ds_feat( self, From 40514da46bc21a8124a18c96da67420b11b89d44 Mon Sep 17 00:00:00 2001 From: Ian Eisenberg Date: Fri, 28 Oct 2022 10:05:20 -0700 Subject: [PATCH 07/11] rearranged some functions, added a helper utility from governance --- credoai/governance/governance.py | 105 ++++++++++++++++++------------- credoai/lens/lens.py | 5 ++ 2 files changed, 68 insertions(+), 42 deletions(-) diff --git a/credoai/governance/governance.py b/credoai/governance/governance.py index b32c0b3c..1ce51ede 100644 --- a/credoai/governance/governance.py +++ b/credoai/governance/governance.py @@ -73,6 +73,7 @@ def __init__( self._evidences: List[Evidence] = [] self._model = None self._plan: Optional[dict] = None + self._unique_tags: List[dict] = [] if credo_api_client: client = credo_api_client @@ -159,7 +160,6 @@ def register( ) # Extract unique tags - self._unique_tags: List[dict] = [] for x in self._evidence_requirements: if x.tags not in self._unique_tags: self._unique_tags.append(x.tags) @@ -192,6 +192,34 @@ def add_evidence(self, evidences: Union[Evidence, List[Evidence]]): def clear_evidence(self): self.set_evidence([]) + def export(self, filename=None): + """ + Upload evidences to CredoAI Governance(Report) App + + Returns + ------- + True + When uploading is successful with all evidence + False + When it is not registered yet, or evidence is insufficient + """ + if not self._validate_export(): + return False + to_return = self._match_requirements() + + if filename is None: + self._api_export() + else: + self._file_export(filename) + + if to_return: + export_status = "All requirements were matched." + else: + export_status = "Partial match of requirements." + + global_logger.info(export_status) + return to_return + def get_evidence_requirements(self, tags: dict = None): """ Returns evidence requirements. Each evidence requirement can have optional tags @@ -216,18 +244,17 @@ def get_evidence_requirements(self, tags: dict = None): ] return reqs + def get_evidence_tags(self): + """Return the unique tags used for all evidence requirements""" + return self._unique_tags + def get_model_tags(self): + """Get the tags for the associated model""" if self._model: return self._model["tags"] else: return None - def set_evidence(self, evidences: List[Evidence]): - """ - Update evidences - """ - self._evidences = evidences - def set_artifacts(self, model, training_dataset=None, assessment_dataset=None): global_logger.info( f"Adding model ({model.name}) to governance. Model has tags: {model.tags}" @@ -239,45 +266,27 @@ def set_artifacts(self, model, training_dataset=None, assessment_dataset=None): prepared_model["assessment_dataset_name"] = assessment_dataset.name self._model = prepared_model - def match_requirements(self): - missing = [] - required_labels = [e.label for e in self.get_evidence_requirements()] - for label in required_labels: - matching_evidence = self._check_inclusion(label, self._evidences) - if not matching_evidence: - missing.append(label) - global_logger.info(f"Missing required evidence with label ({label}).") - else: - matching_evidence[0].label = label - return not bool(missing) - - def export(self, filename=None): + def set_evidence(self, evidences: List[Evidence]): """ - Upload evidences to CredoAI Governance(Report) App - - Returns - ------- - True - When uploading is successful with all evidence - False - When it is not registered yet, or evidence is insufficient + Update evidences """ - if not self._validate_export(): - return False - to_return = self.match_requirements() - - if filename is None: - self._api_export() - else: - self._file_export(filename) + self._evidences = evidences - if to_return: - export_status = "All requirements were matched." + def tag_model(self, model): + tags = self.get_evidence_tags() + print(f"Select tag from assessment plan to associated with model:") + print("0: No tags") + for number, tag in enumerate(tags): + print(f"{number+1}: {tag}") + selection = int(input("Number of tag to associate: ")) + if selection == 0: + selected_tag = None else: - export_status = "Partial match of requirements." - - global_logger.info(export_status) - return to_return + selected_tag = tags[selection - 1] + print(f"Selected tag = {selected_tag}. Applying to model...") + model.tags = selected_tag + if self._model: + self._model["tags"] = selected_tag def _api_export(self): global_logger.info( @@ -309,6 +318,18 @@ def _file_export(self, filename): with open(filename, "w") as f: f.write(data) + def _match_requirements(self): + missing = [] + required_labels = [e.label for e in self.get_evidence_requirements()] + for label in required_labels: + matching_evidence = self._check_inclusion(label, self._evidences) + if not matching_evidence: + missing.append(label) + global_logger.info(f"Missing required evidence with label ({label}).") + else: + matching_evidence[0].label = label + return not bool(missing) + def _prepare_export_data(self): evidences = self._prepare_evidences() data = { diff --git a/credoai/lens/lens.py b/credoai/lens/lens.py index 95c41801..82367cda 100644 --- a/credoai/lens/lens.py +++ b/credoai/lens/lens.py @@ -380,6 +380,11 @@ def _generate_pipeline(self, pipeline): if self.gov: self.logger.info("Empty pipeline: generating from governance.") pipeline = PipelineCreator.generate_from_governance(self.gov) + if not pipeline: + self.logger.warning( + "No pipeline created from governance! Check that your" + " model is properly tagged. Try using Governance.tag_model" + ) else: return # Create pipeline from list of steps From eb8a2533eaf27d535430a8c2c18527ea90cc40a8 Mon Sep 17 00:00:00 2001 From: fabrizio-credo Date: Fri, 28 Oct 2022 10:55:11 -0700 Subject: [PATCH 08/11] Fixing function to find all evaluator subclasses --- credoai/evaluators/utils/utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/credoai/evaluators/utils/utils.py b/credoai/evaluators/utils/utils.py index 7e73b38b..60c913fb 100644 --- a/credoai/evaluators/utils/utils.py +++ b/credoai/evaluators/utils/utils.py @@ -3,9 +3,15 @@ def name2evaluator(evaluator_name): """Converts evaluator name to evaluator class""" - for eval in Evaluator.__subclasses__(): + for eval in all_subclasses(Evaluator): if evaluator_name == eval.__name__: return eval raise Exception( f"<{evaluator_name}> not found in list of Evaluators. Please confirm specified evaluator name is identical to Evaluator class definition." ) + + +def all_subclasses(cls): + return set(cls.__subclasses__()).union( + [s for c in cls.__subclasses__() for s in all_subclasses(c)] + ) From 2ff4ec04e174487847706fc607310244b2194b2b Mon Sep 17 00:00:00 2001 From: Ian Eisenberg Date: Fri, 28 Oct 2022 13:10:45 -0700 Subject: [PATCH 09/11] added docstrings --- credoai/governance/governance.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/credoai/governance/governance.py b/credoai/governance/governance.py index 1ce51ede..642f67e4 100644 --- a/credoai/governance/governance.py +++ b/credoai/governance/governance.py @@ -223,7 +223,8 @@ def export(self, filename=None): def get_evidence_requirements(self, tags: dict = None): """ Returns evidence requirements. Each evidence requirement can have optional tags - (a dictionary). + (a dictionary). Only returns requirements that have tags that match the model + (if provided), which are tags with the same tags as the model, or no tags. Parameters ---------- @@ -256,6 +257,7 @@ def get_model_tags(self): return None def set_artifacts(self, model, training_dataset=None, assessment_dataset=None): + """Sets up internal knowledge of model and datasets to send to Credo AI Platform""" global_logger.info( f"Adding model ({model.name}) to governance. Model has tags: {model.tags}" ) @@ -273,6 +275,7 @@ def set_evidence(self, evidences: List[Evidence]): self._evidences = evidences def tag_model(self, model): + """Interactive utility to tag a model tags from assessment plan""" tags = self.get_evidence_tags() print(f"Select tag from assessment plan to associated with model:") print("0: No tags") From 9980bb7b2985adbcb3dea2158fa06efb4d8baea7 Mon Sep 17 00:00:00 2001 From: fabrizio-credo Date: Fri, 28 Oct 2022 13:34:28 -0700 Subject: [PATCH 10/11] Test fix --- tests/credoai/governance/test_credo_api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/credoai/governance/test_credo_api.py b/tests/credoai/governance/test_credo_api.py index 996f291b..a2eb480d 100644 --- a/tests/credoai/governance/test_credo_api.py +++ b/tests/credoai/governance/test_credo_api.py @@ -70,7 +70,6 @@ def test_create_assessment(self, api, client): "evidences": evidences, "$type": "assessments", } - api.create_assessment(use_case_id, policy_pack_id, evidences) + api.create_assessment(use_case_id, body) client.post.assert_called_with(f"use_cases/{use_case_id}/assessments", body) - From 67f8faf04d4ae39d36717df06be691962210cc7a Mon Sep 17 00:00:00 2001 From: Ian Eisenberg Date: Fri, 28 Oct 2022 14:25:34 -0700 Subject: [PATCH 11/11] updated version --- credoai/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/credoai/__init__.py b/credoai/__init__.py index d8120708..7a7f1ed0 100644 --- a/credoai/__init__.py +++ b/credoai/__init__.py @@ -2,4 +2,4 @@ Primary interface for Credo AI Lens package """ -__version__ = "1.0.1" +__version__ = "1.1.0"