From fc26b5de95784bbc54cdf1ef2831a6a715911a62 Mon Sep 17 00:00:00 2001 From: Anindyadeep Date: Tue, 5 Mar 2024 11:06:15 +0530 Subject: [PATCH] fix _load_model --- deepeval/experimental/harness/models.py | 46 ++++++++++++++++++------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/deepeval/experimental/harness/models.py b/deepeval/experimental/harness/models.py index 9e741cdfe..2f1c6b753 100644 --- a/deepeval/experimental/harness/models.py +++ b/deepeval/experimental/harness/models.py @@ -1,15 +1,26 @@ -from typing import Optional, Union, List +from typing import Optional, Union, List from deepeval.models.base_model import DeepEvalBaseModel -from deepeval.experimental.harness.config import GeneralConfig, APIEndpointConfig +from deepeval.experimental.harness.config import ( + GeneralConfig, + APIEndpointConfig, +) class DeepEvalHarnessModel(DeepEvalBaseModel): - def __init__(self, model_name_or_path: str, model_backend: str, **kwargs) -> None: - self.model_name_or_path, self.model_backend = model_name_or_path, model_backend + def __init__( + self, model_name_or_path: str, model_backend: str, **kwargs + ) -> None: + self.model_name_or_path, self.model_backend = ( + model_name_or_path, + model_backend, + ) self.additional_params = kwargs - super().__init__(model_name=model_name_or_path, **self.additional_params) + super().__init__( + model_name=model_name_or_path, **self.additional_params + ) + def load_model(self, *args, **kwargs): try: from easy_eval import HarnessEvaluator except ImportError as error: @@ -18,17 +29,26 @@ def __init__(self, model_name_or_path: str, model_backend: str, **kwargs) -> Non "easy_eval is not found." "You can install it using: pip install easy-evaluator" ) + self.evaluator = HarnessEvaluator( - model_name_or_path=self.model_name_or_path, model_backend=self.model_backend, **self.additional_params + model_name_or_path=self.model_name_or_path, + model_backend=self.model_backend, + **self.additional_params, ) - - def load_model(self, *args, **kwargs): - return self.evaluator.llm - - def _call(self, tasks: List[str], config: Optional[Union[GeneralConfig, APIEndpointConfig]] = None): - # TODO: Anthropic is not supported in APIEndpointConfig. + return self.evaluator.llm + + def _call( + self, + tasks: List[str], + config: Optional[Union[GeneralConfig, APIEndpointConfig]] = None, + ): + # TODO: Anthropic is not supported in APIEndpointConfig. if config is None: - self.config = APIEndpointConfig() if self.model_name_or_path == "openai" else GeneralConfig() + self.config = ( + APIEndpointConfig() + if self.model_name_or_path == "openai" + else GeneralConfig() + ) else: self.config = config return self.evaluator.evaluate(tasks=tasks, config=self.config)