From 6fad5cb29f669b3ed7a9eacc1486e24c79021916 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Tue, 9 Jan 2024 23:05:46 -0800 Subject: [PATCH 01/15] Add maximum score base metric --- deepeval/metrics/answer_relevancy.py | 1 + deepeval/metrics/base_metric.py | 26 +++++++++++---------- deepeval/metrics/contextual_precision.py | 1 + deepeval/metrics/contextual_recall.py | 1 + deepeval/metrics/contextual_relevancy.py | 1 + deepeval/metrics/faithfulness.py | 1 + deepeval/metrics/hallucination_metric.py | 3 ++- deepeval/metrics/judgemental_gpt.py | 3 ++- deepeval/metrics/llm_eval_metric.py | 3 ++- deepeval/metrics/non_toxic_metric.py | 3 ++- deepeval/metrics/summarization.py | 1 + deepeval/metrics/unbias_metric.py | 3 ++- deepeval/models/summac_model.py | 18 +++++++-------- deepeval/test_case.py | 4 ++++ deepeval/test_run/test_run.py | 2 +- tests/test_custom_execution_time.py | 29 ++++++++++++++++++++++++ 16 files changed, 73 insertions(+), 27 deletions(-) create mode 100644 tests/test_custom_execution_time.py diff --git a/deepeval/metrics/answer_relevancy.py b/deepeval/metrics/answer_relevancy.py index a4cdadbb2..2d26e8edb 100644 --- a/deepeval/metrics/answer_relevancy.py +++ b/deepeval/metrics/answer_relevancy.py @@ -115,6 +115,7 @@ def _generate_key_points( return data["key_points"] def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/base_metric.py b/deepeval/metrics/base_metric.py index ce2cdb866..8a541d842 100644 --- a/deepeval/metrics/base_metric.py +++ b/deepeval/metrics/base_metric.py @@ -9,6 +9,8 @@ class BaseMetric: score: float = 0 score_metadata: Dict = None reason: Optional[str] = None + # max_execution_time: Optional[float] = None + # max_cost: Optional[float] = None @property def minimum_score(self) -> float: @@ -20,18 +22,18 @@ def minimum_score(self, value: float): # Measure function signature is subject to be different - not sure # how applicable this is - might need a better abstraction - @abstractmethod - def measure(self, test_case: LLMTestCase, *args, **kwargs) -> float: - raise NotImplementedError - - def _get_init_values(self): - # We use this method for sending useful metadata - init_values = { - param: getattr(self, param) - for param in vars(self) - if isinstance(getattr(self, param), (str, int, float)) - } - return init_values + # @abstractmethod + # def measure(self, test_case: LLMTestCase, *args, **kwargs) -> float: + # raise NotImplementedError + + # def _get_init_values(self): + # # We use this method for sending useful metadata + # init_values = { + # param: getattr(self, param) + # for param in vars(self) + # if isinstance(getattr(self, param), (str, int, float)) + # } + # return init_values @abstractmethod def is_successful(self) -> bool: diff --git a/deepeval/metrics/contextual_precision.py b/deepeval/metrics/contextual_precision.py index 8c7f0b9d6..d7d8e4b7b 100644 --- a/deepeval/metrics/contextual_precision.py +++ b/deepeval/metrics/contextual_precision.py @@ -136,6 +136,7 @@ def _generate_verdicts( return verdicts def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/contextual_recall.py b/deepeval/metrics/contextual_recall.py index 347a97463..bc2fa99b0 100644 --- a/deepeval/metrics/contextual_recall.py +++ b/deepeval/metrics/contextual_recall.py @@ -102,6 +102,7 @@ def _generate_verdicts( return verdicts def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/contextual_relevancy.py b/deepeval/metrics/contextual_relevancy.py index 556d0fe5b..b5860c4a0 100644 --- a/deepeval/metrics/contextual_relevancy.py +++ b/deepeval/metrics/contextual_relevancy.py @@ -131,6 +131,7 @@ def _generate_verdicts_list( return verdicts_list def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/faithfulness.py b/deepeval/metrics/faithfulness.py index 74a5ddc45..8e1f96feb 100644 --- a/deepeval/metrics/faithfulness.py +++ b/deepeval/metrics/faithfulness.py @@ -172,6 +172,7 @@ def _generate_verdicts_list( return verdicts_list def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/hallucination_metric.py b/deepeval/metrics/hallucination_metric.py index 3f63c6910..67810415a 100644 --- a/deepeval/metrics/hallucination_metric.py +++ b/deepeval/metrics/hallucination_metric.py @@ -25,11 +25,12 @@ def measure(self, test_case: LLMTestCase): if score > max_score: max_score = score - self.success = max_score > self.minimum_score + self.success = max_score >= self.minimum_score self.score = max_score return max_score def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/judgemental_gpt.py b/deepeval/metrics/judgemental_gpt.py index 30f44836f..5054b8b8c 100644 --- a/deepeval/metrics/judgemental_gpt.py +++ b/deepeval/metrics/judgemental_gpt.py @@ -74,5 +74,6 @@ def measure(self, test_case: LLMTestCase): return self.score - def is_successful(self): + def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success diff --git a/deepeval/metrics/llm_eval_metric.py b/deepeval/metrics/llm_eval_metric.py index 4d08674f5..461823912 100644 --- a/deepeval/metrics/llm_eval_metric.py +++ b/deepeval/metrics/llm_eval_metric.py @@ -76,7 +76,8 @@ def measure(self, test_case: LLMTestCase): self.success = score >= self.minimum_score return self.score - def is_successful(self): + def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success def generate_evaluation_steps(self): diff --git a/deepeval/metrics/non_toxic_metric.py b/deepeval/metrics/non_toxic_metric.py index 96318bef2..1b41aa414 100644 --- a/deepeval/metrics/non_toxic_metric.py +++ b/deepeval/metrics/non_toxic_metric.py @@ -62,7 +62,8 @@ def measure(self, test_case: LLMTestCase): self.score = average_score return self.score - def is_successful(self): + def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/summarization.py b/deepeval/metrics/summarization.py index 9a005e6fd..d3ee73101 100644 --- a/deepeval/metrics/summarization.py +++ b/deepeval/metrics/summarization.py @@ -134,6 +134,7 @@ def get_answer(self, question: str, text: str) -> str: return res.content def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/metrics/unbias_metric.py b/deepeval/metrics/unbias_metric.py index 945d07c67..9511edbad 100644 --- a/deepeval/metrics/unbias_metric.py +++ b/deepeval/metrics/unbias_metric.py @@ -69,7 +69,8 @@ def measure(self, test_case: LLMTestCase, return_all_scores: bool = False): return average_score - def is_successful(self): + def is_successful(self) -> bool: + self.success = self.score >= self.minimum_score return self.success @property diff --git a/deepeval/models/summac_model.py b/deepeval/models/summac_model.py index 7df978794..daa6ab258 100644 --- a/deepeval/models/summac_model.py +++ b/deepeval/models/summac_model.py @@ -1,5 +1,5 @@ import torch -from typing import Union, List +from typing import Union, List, Optional from typing import List, Union, get_origin from deepeval.models.base import DeepEvalBaseModel from deepeval.models._summac_model import _SummaCZS @@ -8,9 +8,9 @@ class SummaCModels(DeepEvalBaseModel): def __init__( self, - model_name: str | None = None, - granularity: str | None = None, - device: str | None = None, + model_name: Optional[str] = None, + granularity: Optional[str] = None, + device: Optional[str] = None, *args, **kwargs ): @@ -27,11 +27,11 @@ def __init__( def load_model( self, - op1: str | None = "max", - op2: str | None = "mean", - use_ent: bool | None = True, - use_con: bool | None = True, - image_load_cache: bool | None = True, + op1: Optional[str] = "max", + op2: Optional[str] = "mean", + use_ent: Optional[bool] = True, + use_con: Optional[bool] = True, + image_load_cache: Optional[bool] = True, **kwargs ): return _SummaCZS( diff --git a/deepeval/test_case.py b/deepeval/test_case.py index ca3b2757d..48e17647c 100644 --- a/deepeval/test_case.py +++ b/deepeval/test_case.py @@ -20,6 +20,8 @@ def __init__( expected_output: Optional[str] = None, context: Optional[List[str]] = None, retrieval_context: Optional[List[str]] = None, + execution_time: Optional[float] = None, + cost: Optional[float] = None, id: Optional[str] = None, ): self.id = id @@ -28,3 +30,5 @@ def __init__( self.expected_output = expected_output self.context = context self.retrieval_context = retrieval_context + self.execution_time = execution_time + self.cost = cost diff --git a/deepeval/test_run/test_run.py b/deepeval/test_run/test_run.py index 7ef742632..3fe5a0092 100644 --- a/deepeval/test_run/test_run.py +++ b/deepeval/test_run/test_run.py @@ -100,7 +100,7 @@ def add_llm_test_case( existing_test_case.metrics_metadata.append(metrics_metadata) success = all( [ - metric.score >= metric.minimum_score + metric.is_successful() for metric in existing_test_case.metrics_metadata ] ) diff --git a/tests/test_custom_execution_time.py b/tests/test_custom_execution_time.py new file mode 100644 index 000000000..4a9d50da2 --- /dev/null +++ b/tests/test_custom_execution_time.py @@ -0,0 +1,29 @@ +# from deepeval.metrics import BaseMetric +# from deepeval.test_case import LLMTestCase +# from deepeval import assert_test + +# class ExecutionTimeMetric(BaseMetric): +# def __init__(self, max_execution_time: float): +# self.max_execution_time = max_execution_time + +# def measure(self, test_case: LLMTestCase): +# self.success = test_case.execution_time <= self.max_execution_time +# if self.success: +# self.score = 1 +# else: +# self.score = 0 + +# return self.score + +# def is_successful(self): +# return self.success + +# @property +# def name(self): +# return "Execution Time" + + +# def test_execution_time(): +# test_case = LLMTestCase(input="...", actual_output="...", execution_time=4.57) +# execution_time_metric = ExecutionTimeMetric(max_execution_time=5) +# assert_test(test_case, [execution_time_metric]) \ No newline at end of file From f0d6d3eefc024094746eea0e90e8f949d6e801b8 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Tue, 9 Jan 2024 23:42:26 -0800 Subject: [PATCH 02/15] reformat --- deepeval/metrics/base_metric.py | 20 +++----------------- tests/test_custom_execution_time.py | 8 ++++---- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/deepeval/metrics/base_metric.py b/deepeval/metrics/base_metric.py index 8a541d842..24aa8abe6 100644 --- a/deepeval/metrics/base_metric.py +++ b/deepeval/metrics/base_metric.py @@ -5,12 +5,9 @@ class BaseMetric: - # set an arbitrary minimum score that will get over-ridden later score: float = 0 score_metadata: Dict = None reason: Optional[str] = None - # max_execution_time: Optional[float] = None - # max_cost: Optional[float] = None @property def minimum_score(self) -> float: @@ -20,20 +17,9 @@ def minimum_score(self) -> float: def minimum_score(self, value: float): self._minimum_score = value - # Measure function signature is subject to be different - not sure - # how applicable this is - might need a better abstraction - # @abstractmethod - # def measure(self, test_case: LLMTestCase, *args, **kwargs) -> float: - # raise NotImplementedError - - # def _get_init_values(self): - # # We use this method for sending useful metadata - # init_values = { - # param: getattr(self, param) - # for param in vars(self) - # if isinstance(getattr(self, param), (str, int, float)) - # } - # return init_values + @abstractmethod + def measure(self, test_case: LLMTestCase, *args, **kwargs) -> float: + raise NotImplementedError @abstractmethod def is_successful(self) -> bool: diff --git a/tests/test_custom_execution_time.py b/tests/test_custom_execution_time.py index 4a9d50da2..488432cad 100644 --- a/tests/test_custom_execution_time.py +++ b/tests/test_custom_execution_time.py @@ -5,7 +5,7 @@ # class ExecutionTimeMetric(BaseMetric): # def __init__(self, max_execution_time: float): # self.max_execution_time = max_execution_time - + # def measure(self, test_case: LLMTestCase): # self.success = test_case.execution_time <= self.max_execution_time # if self.success: @@ -14,16 +14,16 @@ # self.score = 0 # return self.score - + # def is_successful(self): # return self.success # @property # def name(self): # return "Execution Time" - + # def test_execution_time(): # test_case = LLMTestCase(input="...", actual_output="...", execution_time=4.57) # execution_time_metric = ExecutionTimeMetric(max_execution_time=5) -# assert_test(test_case, [execution_time_metric]) \ No newline at end of file +# assert_test(test_case, [execution_time_metric]) From 28d1072cbc2ef0a114059d0dcff1c8998630ffb3 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Wed, 10 Jan 2024 11:19:41 -0800 Subject: [PATCH 03/15] Fix langchain azure --- tests/test_faithfulness.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_faithfulness.py b/tests/test_faithfulness.py index 7b3a5c27c..d3595be69 100644 --- a/tests/test_faithfulness.py +++ b/tests/test_faithfulness.py @@ -38,7 +38,7 @@ """ -@pytest.mark.skip(reason="openai is expensive") +# @pytest.mark.skip(reason="openai is expensive") def test_faithfulness(): test_case = LLMTestCase( input="What is the primary difference between a comet and an asteroid?", From af29c18cee1d3bec48343ed620cff9df8e9ab314 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 14:17:35 -0800 Subject: [PATCH 04/15] Migrate minimum score to threshold --- .DS_Store | Bin 8196 -> 6148 bytes README.md | 12 ++-- deepeval/evaluate.py | 6 +- deepeval/metrics/__init__.py | 1 + deepeval/metrics/answer_relevancy.py | 8 +-- deepeval/metrics/base_metric.py | 10 +-- deepeval/metrics/contextual_precision.py | 8 +-- deepeval/metrics/contextual_recall.py | 8 +-- deepeval/metrics/contextual_relevancy.py | 8 +-- deepeval/metrics/faithfulness.py | 8 +-- deepeval/metrics/hallucination_metric.py | 8 +-- deepeval/metrics/judgemental_gpt.py | 8 +-- deepeval/metrics/llm_eval_metric.py | 8 +-- deepeval/metrics/non_toxic_metric.py | 8 +-- deepeval/metrics/ragas_metric.py | 66 +++++++++---------- deepeval/metrics/summarization.py | 8 +-- deepeval/metrics/unbias_metric.py | 10 +-- deepeval/test_run/api.py | 2 +- deepeval/test_run/test_run.py | 21 +++--- docs/docs/confident-ai-debug-evaluations.mdx | 2 +- docs/docs/confident-ai-evaluate-datasets.mdx | 4 +- docs/docs/evaluation-datasets.mdx | 8 +-- docs/docs/evaluation-test-cases.mdx | 8 +-- docs/docs/getting-started.mdx | 18 ++--- docs/docs/metrics-answer-relevancy.mdx | 2 +- docs/docs/metrics-bias.mdx | 2 +- docs/docs/metrics-contextual-precision.mdx | 2 +- docs/docs/metrics-contextual-recall.mdx | 2 +- docs/docs/metrics-contextual-relevancy.mdx | 2 +- docs/docs/metrics-custom.mdx | 6 +- docs/docs/metrics-faithfulness.mdx | 2 +- docs/docs/metrics-hallucination.mdx | 2 +- docs/docs/metrics-introduction.mdx | 8 +-- docs/docs/metrics-judgemental.mdx | 4 +- docs/docs/metrics-llm-evals.mdx | 4 +- docs/docs/metrics-ragas.mdx | 2 +- docs/docs/metrics-summarization.mdx | 4 +- docs/docs/metrics-toxicity.mdx | 2 +- examples/getting_started/test_example.py | 12 ++-- examples/tracing/test_chatbot.py | 2 +- tests/test_answer_relevancy.py | 2 +- tests/test_contextual_precision.py | 2 +- tests/test_custom_metric.py | 6 +- tests/test_dataset.py | 2 +- tests/test_hallucination_metric.py | 6 +- tests/test_judgemental.py | 2 +- tests/test_llm_metric.py | 4 +- tests/test_quickstart.py | 4 +- tests/test_ragas.py | 48 +++++++------- 49 files changed, 191 insertions(+), 191 deletions(-) diff --git a/.DS_Store b/.DS_Store index 03d385f228cccda13efc3ef9b54b507c402f9232..b4ec85712a36e2cb9a2c5164239c2a4239bd311c 100644 GIT binary patch delta 132 zcmZp1XfcprU|?W$DortDU=RQ@Ie-{Mvv5r;6q~50D9Q_x2a6>$Br+s3WH6*M#BVHI z&dkWWSw}FDY4S;7t;y0NJR2KQ7#Fj1a0oI3H2{GCH;`}z8MLwRJM(0I8BdV03``I! NK(;b$j^~-f3;=)46$$_V delta 504 zcmZoMXmOBWU|?W$DortDU;r^WfEYvza8E20o2aKKDh!eb@);Nm84MVV7|a>s84NZH za)_~PR^aGmUd+zHF|mQA9;8eJC=F6#jI0#MHwE%RP>&&tp_n0`AKRGEUKM7 float: self.reason = self._generate_reason( test_case.input, test_case.actual_output, answer_relevancy_score ) - self.success = answer_relevancy_score >= self.minimum_score + self.success = answer_relevancy_score >= self.threshold self.score = answer_relevancy_score return self.score @@ -115,7 +115,7 @@ def _generate_key_points( return data["key_points"] def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/base_metric.py b/deepeval/metrics/base_metric.py index 24aa8abe6..f73a60143 100644 --- a/deepeval/metrics/base_metric.py +++ b/deepeval/metrics/base_metric.py @@ -10,12 +10,12 @@ class BaseMetric: reason: Optional[str] = None @property - def minimum_score(self) -> float: - return self._minimum_score + def threshold(self) -> float: + return self._threshold - @minimum_score.setter - def minimum_score(self, value: float): - self._minimum_score = value + @threshold.setter + def threshold(self, value: float): + self._threshold = value @abstractmethod def measure(self, test_case: LLMTestCase, *args, **kwargs) -> float: diff --git a/deepeval/metrics/contextual_precision.py b/deepeval/metrics/contextual_precision.py index d7d8e4b7b..5591ef50d 100644 --- a/deepeval/metrics/contextual_precision.py +++ b/deepeval/metrics/contextual_precision.py @@ -19,11 +19,11 @@ class ContextualPrecisionVerdict(BaseModel): class ContextualPrecisionMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.5, + threshold: float = 0.5, model: Optional[str] = None, include_reason: bool = True, ): - self.minimum_score = minimum_score + self.threshold = threshold self.include_reason = include_reason self.model = model @@ -52,7 +52,7 @@ def measure(self, test_case: LLMTestCase) -> float: test_case.input, contextual_precision_score ) - self.success = contextual_precision_score >= self.minimum_score + self.success = contextual_precision_score >= self.threshold self.score = contextual_precision_score return self.score @@ -136,7 +136,7 @@ def _generate_verdicts( return verdicts def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/contextual_recall.py b/deepeval/metrics/contextual_recall.py index bc2fa99b0..6cf2aeec5 100644 --- a/deepeval/metrics/contextual_recall.py +++ b/deepeval/metrics/contextual_recall.py @@ -18,11 +18,11 @@ class ContextualRecallVerdict(BaseModel): class ContextualRecallMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.5, + threshold: float = 0.5, model: Optional[str] = None, include_reason: bool = True, ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model self.include_reason = include_reason self.n = 5 @@ -50,7 +50,7 @@ def measure(self, test_case: LLMTestCase) -> float: test_case.expected_output, contextual_recall_score ) - self.success = contextual_recall_score >= self.minimum_score + self.success = contextual_recall_score >= self.threshold self.score = contextual_recall_score return self.score @@ -102,7 +102,7 @@ def _generate_verdicts( return verdicts def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/contextual_relevancy.py b/deepeval/metrics/contextual_relevancy.py index b5860c4a0..aa717fea4 100644 --- a/deepeval/metrics/contextual_relevancy.py +++ b/deepeval/metrics/contextual_relevancy.py @@ -19,11 +19,11 @@ class ContextualRelevancyVerdict(BaseModel): class ContextualRelevancyMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.5, + threshold: float = 0.5, model: Optional[str] = "gpt-4", include_reason: bool = True, ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model self.include_reason = include_reason @@ -48,7 +48,7 @@ def measure(self, test_case: LLMTestCase) -> float: test_case.input, contextual_recall_score ) - self.success = contextual_recall_score >= self.minimum_score + self.success = contextual_recall_score >= self.threshold self.score = contextual_recall_score return self.score @@ -131,7 +131,7 @@ def _generate_verdicts_list( return verdicts_list def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/faithfulness.py b/deepeval/metrics/faithfulness.py index 8e1f96feb..85409fd5f 100644 --- a/deepeval/metrics/faithfulness.py +++ b/deepeval/metrics/faithfulness.py @@ -20,11 +20,11 @@ class FaithfulnessVerdict(BaseModel): class FaithfulnessMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.5, + threshold: float = 0.5, model: Optional[str] = None, include_reason: bool = True, ): - self.minimum_score = minimum_score + self.threshold = threshold # Don't set self.chat_model when using threading self.model = model self.include_reason = include_reason @@ -49,7 +49,7 @@ def measure(self, test_case: LLMTestCase): ) faithfulness_score = self._generate_score() self.reason = self._generate_reason(faithfulness_score) - self.success = faithfulness_score >= self.minimum_score + self.success = faithfulness_score >= self.threshold self.score = faithfulness_score return self.score @@ -172,7 +172,7 @@ def _generate_verdicts_list( return verdicts_list def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/hallucination_metric.py b/deepeval/metrics/hallucination_metric.py index 67810415a..b9eb3346c 100644 --- a/deepeval/metrics/hallucination_metric.py +++ b/deepeval/metrics/hallucination_metric.py @@ -7,9 +7,9 @@ class HallucinationMetric(BaseMetric, metaclass=Singleton): def __init__( self, - minimum_score: float = 0.5, + threshold: float = 0.5, ): - self.minimum_score = minimum_score + self.threshold = threshold def measure(self, test_case: LLMTestCase): if test_case.actual_output is None or test_case.context is None: @@ -25,12 +25,12 @@ def measure(self, test_case: LLMTestCase): if score > max_score: max_score = score - self.success = max_score >= self.minimum_score + self.success = max_score >= self.threshold self.score = max_score return max_score def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/judgemental_gpt.py b/deepeval/metrics/judgemental_gpt.py index 5054b8b8c..d707adade 100644 --- a/deepeval/metrics/judgemental_gpt.py +++ b/deepeval/metrics/judgemental_gpt.py @@ -24,7 +24,7 @@ def __init__( criteria: str, evaluation_params: List[LLMTestCaseParams], language: Languages = Languages.ENGLISH, - minimum_score: float = 0.5, + threshold: float = 0.5, ): if not isinstance(language, Languages): raise TypeError("'language' must be an instance of Languages.") @@ -33,7 +33,7 @@ def __init__( self.name = name self.evaluation_params = evaluation_params self.language = language.value - self.minimum_score = minimum_score + self.threshold = threshold self.success = None self.reason = None @@ -70,10 +70,10 @@ def measure(self, test_case: LLMTestCase): ) self.reason = response.reason self.score = response.score / 10 - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.score def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success diff --git a/deepeval/metrics/llm_eval_metric.py b/deepeval/metrics/llm_eval_metric.py index 461823912..8726f9a79 100644 --- a/deepeval/metrics/llm_eval_metric.py +++ b/deepeval/metrics/llm_eval_metric.py @@ -26,7 +26,7 @@ def __init__( criteria: Optional[str] = None, evaluation_steps: Optional[List[str]] = None, model: Optional[str] = None, - minimum_score: float = 0.5, + threshold: float = 0.5, ): self.name = name self.evaluation_params = evaluation_params @@ -50,7 +50,7 @@ def __init__( self.criteria = criteria self.model = model self.evaluation_steps = evaluation_steps - self.minimum_score = minimum_score + self.threshold = threshold def measure(self, test_case: LLMTestCase): """LLM evaluated metric based on the GEval framework: https://arxiv.org/pdf/2303.16634.pdf""" @@ -73,11 +73,11 @@ def measure(self, test_case: LLMTestCase): score, reason = self.evaluate(test_case) self.reason = reason self.score = float(score) / 10 - self.success = score >= self.minimum_score + self.success = score >= self.threshold return self.score def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success def generate_evaluation_steps(self): diff --git a/deepeval/metrics/non_toxic_metric.py b/deepeval/metrics/non_toxic_metric.py index 1b41aa414..20c9f78f2 100644 --- a/deepeval/metrics/non_toxic_metric.py +++ b/deepeval/metrics/non_toxic_metric.py @@ -13,13 +13,13 @@ def __init__( self, evaluation_params: List[LLMTestCaseParams], model_name: str = "original", - minimum_score: float = 0.5, + threshold: float = 0.5, ): if not evaluation_params: raise ValueError("evaluation_params cannot be empty or None") self.evaluation_params = evaluation_params - self.minimum_score, self.model_name = minimum_score, model_name + self.threshold, self.model_name = threshold, model_name def __call__(self, test_case: LLMTestCase): score = self.measure(test_case.actual_output) @@ -58,12 +58,12 @@ def measure(self, test_case: LLMTestCase): average_score = sum(total_scores) / len(total_scores) # Check if the average score meets the minimum requirement - self.success = average_score >= self.minimum_score + self.success = average_score >= self.threshold self.score = average_score return self.score def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/ragas_metric.py b/deepeval/metrics/ragas_metric.py index cf27ce146..a1e1e4309 100644 --- a/deepeval/metrics/ragas_metric.py +++ b/deepeval/metrics/ragas_metric.py @@ -17,10 +17,10 @@ class RAGASContextualPrecisionMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -57,7 +57,7 @@ def measure(self, test_case: LLMTestCase): # Ragas only does dataset-level comparisons context_precision_score = scores["context_precision"] - self.success = context_precision_score >= self.minimum_score + self.success = context_precision_score >= self.threshold self.score = context_precision_score return self.score @@ -74,10 +74,10 @@ class RAGASContextualRelevancyMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -113,7 +113,7 @@ def measure(self, test_case: LLMTestCase): # Ragas only does dataset-level comparisons context_relevancy_score = scores["context_relevancy"] - self.success = context_relevancy_score >= self.minimum_score + self.success = context_relevancy_score >= self.threshold self.score = context_relevancy_score return self.score @@ -130,10 +130,10 @@ class RAGASAnswerRelevancyMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -165,7 +165,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, metrics=[answer_relevancy]) answer_relevancy_score = scores["answer_relevancy"] - self.success = answer_relevancy_score >= self.minimum_score + self.success = answer_relevancy_score >= self.threshold self.score = answer_relevancy_score return self.score @@ -180,10 +180,10 @@ def __name__(self): class RAGASFaithfulnessMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -215,7 +215,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, metrics=[faithfulness]) faithfulness_score = scores["faithfulness"] - self.success = faithfulness_score >= self.minimum_score + self.success = faithfulness_score >= self.threshold self.score = faithfulness_score return self.score @@ -232,10 +232,10 @@ class RAGASContextualRecallMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -267,7 +267,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, [context_recall]) context_recall_score = scores["context_recall"] - self.success = context_recall_score >= self.minimum_score + self.success = context_recall_score >= self.threshold self.score = context_recall_score return self.score @@ -284,10 +284,10 @@ class RAGASHarmfulnessMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -320,7 +320,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, [harmfulness]) harmfulness_score = scores["harmfulness"] - self.success = harmfulness_score >= self.minimum_score + self.success = harmfulness_score >= self.threshold self.score = harmfulness_score return self.score @@ -337,10 +337,10 @@ class RAGASCoherenceMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -371,7 +371,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, [coherence]) coherence_score = scores["coherence"] - self.success = coherence_score >= self.minimum_score + self.success = coherence_score >= self.threshold self.score = coherence_score return self.score @@ -388,10 +388,10 @@ class RAGASMaliciousnessMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -423,7 +423,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, [maliciousness]) maliciousness_score = scores["maliciousness"] - self.success = maliciousness_score >= self.minimum_score + self.success = maliciousness_score >= self.threshold self.score = maliciousness_score return self.score @@ -440,10 +440,10 @@ class RAGASCorrectnessMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -475,7 +475,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, metrics=[correctness]) correctness_score = scores["correctness"] - self.success = correctness_score >= self.minimum_score + self.success = correctness_score >= self.threshold self.score = correctness_score return self.score @@ -492,10 +492,10 @@ class RAGASConcisenessMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -526,7 +526,7 @@ def measure(self, test_case: LLMTestCase): dataset = Dataset.from_dict(data) scores = evaluate(dataset, metrics=[conciseness]) conciseness_score = scores["conciseness"] - self.success = conciseness_score >= self.minimum_score + self.success = conciseness_score >= self.threshold self.score = conciseness_score return self.score @@ -543,10 +543,10 @@ class RagasMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.3, + threshold: float = 0.3, model: Optional[str] = "gpt-3.5-turbo", ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model def measure(self, test_case: LLMTestCase): @@ -594,7 +594,7 @@ def measure(self, test_case: LLMTestCase): 1.0 / score for score in score_metadata.values() ) - self.success = ragas_score >= self.minimum_score + self.success = ragas_score >= self.threshold self.score = ragas_score self.score_metadata = score_metadata return self.score diff --git a/deepeval/metrics/summarization.py b/deepeval/metrics/summarization.py index d3ee73101..c5c09ed1d 100644 --- a/deepeval/metrics/summarization.py +++ b/deepeval/metrics/summarization.py @@ -21,12 +21,12 @@ class ScoreType(Enum): class SummarizationMetric(BaseMetric): def __init__( self, - minimum_score: float = 0.5, + threshold: float = 0.5, model: Optional[str] = None, n: Optional[int] = 5, assessment_questions: Optional[List[str]] = None, ): - self.minimum_score = minimum_score + self.threshold = threshold self.model = model self.assessment_questions = assessment_questions self.n = n @@ -54,7 +54,7 @@ def measure(self, test_case: LLMTestCase): summarization_score = min(alignment_score, inclusion_score) - self.success = summarization_score >= self.minimum_score + self.success = summarization_score >= self.threshold self.score_metadata = { "Alignment": alignment_score, "Inclusion": inclusion_score, @@ -134,7 +134,7 @@ def get_answer(self, question: str, text: str) -> str: return res.content def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/metrics/unbias_metric.py b/deepeval/metrics/unbias_metric.py index 9511edbad..fb137caab 100644 --- a/deepeval/metrics/unbias_metric.py +++ b/deepeval/metrics/unbias_metric.py @@ -15,18 +15,18 @@ def __init__( self, evaluation_params: List[LLMTestCaseParams], model_name: str = "original", - minimum_score: float = 0.5, + threshold: float = 0.5, ): # see paper for rationale https://arxiv.org/pdf/2208.05777.pdf if not evaluation_params: raise ValueError("evaluation_params cannot be empty or None") self.evaluation_params = evaluation_params self.model_name = model_name - self.minimum_score = minimum_score + self.threshold = threshold def __call__(self, output, expected_output, query: Optional[str] = "-"): score = self.measure(output, expected_output) - success = score >= self.minimum_score + success = score >= self.threshold return score def measure(self, test_case: LLMTestCase, return_all_scores: bool = False): @@ -61,7 +61,7 @@ def measure(self, test_case: LLMTestCase, return_all_scores: bool = False): # Calculate the average score average_score = total_score / len(self.evaluation_params) - self.success = average_score > self.minimum_score + self.success = average_score > self.threshold self.score = average_score if return_all_scores: @@ -70,7 +70,7 @@ def measure(self, test_case: LLMTestCase, return_all_scores: bool = False): return average_score def is_successful(self) -> bool: - self.success = self.score >= self.minimum_score + self.success = self.score >= self.threshold return self.success @property diff --git a/deepeval/test_run/api.py b/deepeval/test_run/api.py index 3db64aec3..3a3f37b5b 100644 --- a/deepeval/test_run/api.py +++ b/deepeval/test_run/api.py @@ -5,7 +5,7 @@ class MetricsMetadata(BaseModel): metric: str score: float - minimum_score: float = Field(None, alias="minimumScore") + threshold: float reason: Optional[str] = None diff --git a/deepeval/test_run/test_run.py b/deepeval/test_run/test_run.py index 3fe5a0092..3a1c41ffa 100644 --- a/deepeval/test_run/test_run.py +++ b/deepeval/test_run/test_run.py @@ -84,26 +84,25 @@ def add_llm_test_case( ): # Check if test case with the same ID already exists test_case_id = id(test_case) - existing_test_case: LLMTestCase = self.dict_test_cases.get( + existing_test_case: APITestCase = self.dict_test_cases.get( test_case_id, None ) metrics_metadata = MetricsMetadata( metric=metric.__name__, score=metric.score, - minimumScore=metric.minimum_score, + threshold=metric.threshold, reason=metric.reason, + # success=metric.is_successful() ) if existing_test_case: # If it exists, append the metrics to the existing test case existing_test_case.metrics_metadata.append(metrics_metadata) - success = all( - [ - metric.is_successful() - for metric in existing_test_case.metrics_metadata - ] - ) + if metric.is_successful() and existing_test_case.success == True: + success = True + else: + success = False # Update the success status existing_test_case.success = success else: @@ -215,7 +214,7 @@ def display_results_table(self, test_run: TestRun): test_case_name += f" ({test_case.id})" for metric_metadata in test_case.metrics_metadata: - if metric_metadata.score >= metric_metadata.minimum_score: + if metric_metadata.score >= metric_metadata.threshold: pass_count += 1 else: fail_count += 1 @@ -229,7 +228,7 @@ def display_results_table(self, test_run: TestRun): ) for metric_metadata in test_case.metrics_metadata: - if metric_metadata.score >= metric_metadata.minimum_score: + if metric_metadata.score >= metric_metadata.threshold: status = "[green]PASSED[/green]" else: status = "[red]FAILED[/red]" @@ -237,7 +236,7 @@ def display_results_table(self, test_run: TestRun): table.add_row( "", str(metric_metadata.metric), - f"{round(metric_metadata.score,2)} (threshold={metric_metadata.minimum_score}, reason={metric_metadata.reason})", + f"{round(metric_metadata.score,2)} (threshold={metric_metadata.threshold}, reason={metric_metadata.reason})", status, "", ) diff --git a/docs/docs/confident-ai-debug-evaluations.mdx b/docs/docs/confident-ai-debug-evaluations.mdx index c4aa15364..13867035c 100644 --- a/docs/docs/confident-ai-debug-evaluations.mdx +++ b/docs/docs/confident-ai-debug-evaluations.mdx @@ -152,7 +152,7 @@ def test_hallucination(): ] input = "What are the requimrents to be president?" - metric = HallucinationMetric(minimum_score=0.8) + metric = HallucinationMetric(threshold=0.8) test_case = LLMTestCase( input=input, actual_output=chatbot.query(user_input=input), diff --git a/docs/docs/confident-ai-evaluate-datasets.mdx b/docs/docs/confident-ai-evaluate-datasets.mdx index b5b5b6b7a..712b6ffdf 100644 --- a/docs/docs/confident-ai-evaluate-datasets.mdx +++ b/docs/docs/confident-ai-evaluate-datasets.mdx @@ -111,7 +111,7 @@ dataset.pull(alias="My Confident Dataset") dataset, ) def test_customer_chatbot(test_case: LLMTestCase): - hallucination_metric = HallucinationMetric(minimum_score=0.3) + hallucination_metric = HallucinationMetric(threshold=0.3) assert_test(test_case, [hallucination_metric]) ``` @@ -128,7 +128,7 @@ from deepeval import evaluate from deepeval.metrics import HallucinationMetric from deepeval.dataset import EvaluationDataset -hallucination_metric = HallucinationMetric(minimum_score=0.3) +hallucination_metric = HallucinationMetric(threshold=0.3) # Initialize empty dataset object and pull from Confident dataset = EvaluationDataset() diff --git a/docs/docs/evaluation-datasets.mdx b/docs/docs/evaluation-datasets.mdx index b34503ea5..09fc2bfac 100644 --- a/docs/docs/evaluation-datasets.mdx +++ b/docs/docs/evaluation-datasets.mdx @@ -157,8 +157,8 @@ dataset = EvaluationDataset(test_cases=[...]) dataset, ) def test_customer_chatbot(test_case: LLMTestCase): - hallucination_metric = HallucinationMetric(minimum_score=0.3) - answer_relevancy_metric = AnswerRelevancyMetric(minimum_score=0.5) + hallucination_metric = HallucinationMetric(threshold=0.3) + answer_relevancy_metric = AnswerRelevancyMetric(threshold=0.5) assert_test(test_case, [hallucination_metric, answer_relevancy_metric]) @@ -186,8 +186,8 @@ from deepeval.dataset import EvaluationDataset dataset = EvaluationDataset(test_cases=[...]) -hallucination_metric = HallucinationMetric(minimum_score=0.3) -answer_relevancy_metric = AnswerRelevancyMetric(minimum_score=0.5) +hallucination_metric = HallucinationMetric(threshold=0.3) +answer_relevancy_metric = AnswerRelevancyMetric(threshold=0.5) dataset.evaluate([hallucination_metric, answer_relevancy_metric]) diff --git a/docs/docs/evaluation-test-cases.mdx b/docs/docs/evaluation-test-cases.mdx index e21f8e3c8..270f3be2d 100644 --- a/docs/docs/evaluation-test-cases.mdx +++ b/docs/docs/evaluation-test-cases.mdx @@ -202,7 +202,7 @@ test_case = LLMTestCase( context=context ) -metric = HallucinationMetric(minimum_score=0.7) +metric = HallucinationMetric(threshold=0.7) run_test(test_case, [metric]) ``` @@ -245,7 +245,7 @@ def test_case_1(): expected_output="Me, ruff!", context=context ) - metric = HallucinationMetric(minimum_score=0.7) + metric = HallucinationMetric(threshold=0.7) assert_test(test_case, metrics=[metric]) def test_case_2(): @@ -259,7 +259,7 @@ def test_case_2(): expected_output="Me, ruff!", context=context ) - metric = HallucinationMetric(minimum_score=0.7) + metric = HallucinationMetric(threshold=0.7) assert_test(test_case, metrics=[metric]) ``` @@ -307,7 +307,7 @@ second_test_case = LLMTestCase( test_cases = [first_test_case, second_test_case] -metric = HallucinationMetric(minimum_score=0.7) +metric = HallucinationMetric(threshold=0.7) evaluate(test_cases, [metric]) ``` diff --git a/docs/docs/getting-started.mdx b/docs/docs/getting-started.mdx index a34f15793..93fb66a4e 100644 --- a/docs/docs/getting-started.mdx +++ b/docs/docs/getting-started.mdx @@ -60,7 +60,7 @@ def test_hallucination(): # Replace this with the actual output of your LLM application actual_output = "We offer a 30-day full refund at no extra cost." - hallucination_metric = HallucinationMetric(minimum_score=0.7) + hallucination_metric = HallucinationMetric(threshold=0.7) test_case = LLMTestCase(input=input, actual_output=actual_output, context=context) assert_test(test_case, [hallucination_metric]) ``` @@ -74,8 +74,8 @@ deepeval test run test_example.py **Congratulations! Your test case should have passed ✅** Let's breakdown what happened. - The variable `input` mimics a user input, and `actual_output` is a placeholder for what your application's supposed to output based on this input. -- The variable `context` contains the relevant information from your knowledge base, and `HallucinationMetric(minimum_score=0.7)` is an default metric provided by DeepEval for you to evaluate how factually correct your application's output is based on the provided context. -- All metric scores range from 0 - 1, which the `minimum_score=0.7` threshold ultimately determines if your test have passed or not. +- The variable `context` contains the relevant information from your knowledge base, and `HallucinationMetric(threshold=0.7)` is an default metric provided by DeepEval for you to evaluate how factually correct your application's output is based on the provided context. +- All metric scores range from 0 - 1, which the `threshold=0.7` threshold ultimately determines if your test have passed or not. :::note `deepeval`'s default metrics are not evaluated using LLMs. Keep reading this tutorial to learn how to create an LLM based evaluation metric. @@ -112,7 +112,7 @@ def test_summarization(): name="Summarization", criteria="Summarization - determine if the actual output is an accurate and concise summarization of the input.", evaluation_params=[LLMTestCaseParams.INPUT, LLMTestCaseParams.ACTUAL_OUTPUT], - minimum_score=0.5 + threshold=0.5 ) test_case = LLMTestCase(input=input, actual_output=actual_output) assert_test(test_case, [summarization_metric]) @@ -130,10 +130,10 @@ from deepeval.metrics import BaseMetric class LengthMetric(BaseMetric): # This metric checks if the output length is greater than 10 characters def __init__(self, max_length: int=10): - self.minimum_score = max_length + self.threshold = max_length def measure(self, test_case: LLMTestCase): - self.success = len(test_case.actual_output) > self.minimum_score + self.success = len(test_case.actual_output) > self.threshold if self.success: score = 1 else: @@ -184,13 +184,13 @@ def test_everything(): # Replace this with the actual output of your LLM application actual_output = "We offer a 30-day full refund at no extra cost." - hallucination_metric = HallucinationMetric(minimum_score=0.7) + hallucination_metric = HallucinationMetric(threshold=0.7) length_metric = LengthMetric(max_length=10) summarization_metric = LLMEvalMetric( name="Summarization", criteria="Summarization - determine if the actual output is an accurate and concise summarization of the input.", evaluation_params=[LLMTestCaseParams.INPUT, LLMTestCaseParams.ACTUAL_OUTPUT], - minimum_score=0.5 + threshold=0.5 ) test_case = LLMTestCase(input=input, actual_output=actual_output, context=context) @@ -262,7 +262,7 @@ def test_hallucination(test_case: dict): # Replace this with the actual output of your LLM application actual_output = "We offer a 30-day full refund at no extra cost." - hallucination_metric = HallucinationMetric(minimum_score=0.7) + hallucination_metric = HallucinationMetric(threshold=0.7) test_case = LLMTestCase( input=input, actual_output=actual_output, diff --git a/docs/docs/metrics-answer-relevancy.mdx b/docs/docs/metrics-answer-relevancy.mdx index 9239f517a..2dc996da7 100644 --- a/docs/docs/metrics-answer-relevancy.mdx +++ b/docs/docs/metrics-answer-relevancy.mdx @@ -28,7 +28,7 @@ actual_output = "We offer a 30-day full refund at no extra cost." retrieval_context = ["All customers are eligible for a 30 day full refund at no extra cost."] metric = AnswerRelevancyMetric( - minimum_score=0.7, + threshold=0.7, model="gpt-4", include_reason=True ) diff --git a/docs/docs/metrics-bias.mdx b/docs/docs/metrics-bias.mdx index 2c766ad6d..4cab96246 100644 --- a/docs/docs/metrics-bias.mdx +++ b/docs/docs/metrics-bias.mdx @@ -39,7 +39,7 @@ actual_output = "We offer a 30-day full refund at no extra cost." metric = UnBiasedMetric( evaluation_params=[LLMTestCaseParams.ACTUAL_OUTPUT], - minimum_score=0.5 + threshold=0.5 ) test_case = LLMTestCase( input="What if these shoes don't fit?", diff --git a/docs/docs/metrics-contextual-precision.mdx b/docs/docs/metrics-contextual-precision.mdx index 695ea5d93..76ed83c42 100644 --- a/docs/docs/metrics-contextual-precision.mdx +++ b/docs/docs/metrics-contextual-precision.mdx @@ -28,7 +28,7 @@ actual_output = "We offer a 30-day full refund at no extra cost." retrieval_context = ["All customers are eligible for a 30 day full refund at no extra cost."] metric = ContextualPrecisionMetric( - minimum_score=0.7, + threshold=0.7, model="gpt-4", include_reason=True ) diff --git a/docs/docs/metrics-contextual-recall.mdx b/docs/docs/metrics-contextual-recall.mdx index b6afe519d..5be7ab128 100644 --- a/docs/docs/metrics-contextual-recall.mdx +++ b/docs/docs/metrics-contextual-recall.mdx @@ -32,7 +32,7 @@ expected_output = "You are eligible for a 30 day full refund at no extra cost." retrieval_context = ["All customers are eligible for a 30 day full refund at no extra cost."] metric = ContextualRecallMetric( - minimum_score=0.7, + threshold=0.7, model="gpt-4", include_reason=True ) diff --git a/docs/docs/metrics-contextual-relevancy.mdx b/docs/docs/metrics-contextual-relevancy.mdx index 2e52fe43d..4fa3604ba 100644 --- a/docs/docs/metrics-contextual-relevancy.mdx +++ b/docs/docs/metrics-contextual-relevancy.mdx @@ -32,7 +32,7 @@ actual_output = "We offer a 30-day full refund at no extra cost." retrieval_context = ["All customers are eligible for a 30 day full refund at no extra cost."] metric = ContextualRelevancyMetric( - minimum_score=0.7, + threshold=0.7, model="gpt-4", include_reason=True ) diff --git a/docs/docs/metrics-custom.mdx b/docs/docs/metrics-custom.mdx index 7e24304c9..2cadb8f3e 100644 --- a/docs/docs/metrics-custom.mdx +++ b/docs/docs/metrics-custom.mdx @@ -27,11 +27,11 @@ from deepeval.test_case import LLMTestCase class LengthMetric(BaseMetric): # This metric checks if the output length is greater than 10 characters def __init__(self, max_length: int=10): - self.minimum_score = max_length + self.threshold = max_length def measure(self, test_case: LLMTestCase): # Set self.success and self.score in the "measure" method - self.success = len(test_case.actual_output) > self.minimum_score + self.success = len(test_case.actual_output) > self.threshold if self.success: self.score = 1 else: @@ -52,7 +52,7 @@ class LengthMetric(BaseMetric): Notice that a few things has happened: -- `self.minimum_score` was set in `__init__()` +- `self.threshold` was set in `__init__()` - `self.success`, `self.score`, and `self.reason` was set in `measure()` - `measure()` takes in an `LLMTestCase` - `self.is_successful()` simply returns the success status diff --git a/docs/docs/metrics-faithfulness.mdx b/docs/docs/metrics-faithfulness.mdx index 6a2f8fefb..efed3eb5f 100644 --- a/docs/docs/metrics-faithfulness.mdx +++ b/docs/docs/metrics-faithfulness.mdx @@ -32,7 +32,7 @@ actual_output = "We offer a 30-day full refund at no extra cost." retrieval_context = ["All customers are eligible for a 30 day full refund at no extra cost."] metric = FaithfulnessMetric( - minimum_score=0.7, + threshold=0.7, model="gpt-4", include_reason=True ) diff --git a/docs/docs/metrics-hallucination.mdx b/docs/docs/metrics-hallucination.mdx index 75b294266..8ed90e46a 100644 --- a/docs/docs/metrics-hallucination.mdx +++ b/docs/docs/metrics-hallucination.mdx @@ -40,7 +40,7 @@ test_case = LLMTestCase( actual_output=actual_output, context=context ) -metric = HallucinationMetric(minimum_score=0.5) +metric = HallucinationMetric(threshold=0.5) metric.measure(test_case) print(metric.score) diff --git a/docs/docs/metrics-introduction.mdx b/docs/docs/metrics-introduction.mdx index f2cb51ecb..40eaf9115 100644 --- a/docs/docs/metrics-introduction.mdx +++ b/docs/docs/metrics-introduction.mdx @@ -32,7 +32,7 @@ A **_custom_** metric is a type of metric you can easily create by implementing - are extra reliable as LLMs are only used for extremely specific tasks during evaluation to greatly reduce stochasticity and flakiness in scores. - provide a comprehensive reason for the scores computed. -All of `deepeval`'s default metrics output a score between 0-1, and require a `minimum_score` argument to instantiate. A default metric is only successful if the evaluation score is equal to or greater than `minimum_score`. +All of `deepeval`'s default metrics output a score between 0-1, and require a `threshold` argument to instantiate. A default metric is only successful if the evaluation score is equal to or greater than `threshold`. :::info All GPT models from OpenAI are available for LLM-Evals (metrics that use LLMs for evaluation). You can switch between models by providing a string corresponding to OpenAI's model names via the optional `model` argument when instantiating an LLM-Eval. @@ -82,7 +82,7 @@ All metrics in `deepeval`, including [custom metrics that you create](metrics-cu - can have its score accessed via `metric.score` - can have its status accessed via `metric.is_successful()` - can be used to evaluate test cases or entire datasets, with or without Pytest. -- has a `minimum_score` that acts as the threshold for success. `metric.is_successful()` is only true if `metric.score` >= `minimum_score`. +- has a `threshold` that acts as the threshold for success. `metric.is_successful()` is only true if `metric.score` >= `threshold`. In additional, most LLM-Evals in `deepeval` offers a reason for its score, which can be accessed via `metric.reason`. @@ -99,8 +99,8 @@ from deepeval.test_case import LLMTestCase # Initialize a test case test_case = LLMTestCase(input="...", actual_output="...") -# Initialize metric with minimum_score -metric = AnswerRelevancyMetric(minimum_score=0.5) +# Initialize metric with threshold +metric = AnswerRelevancyMetric(threshold=0.5) ``` Using this metric, you can either evaluate a test case using `deepeval test run`: diff --git a/docs/docs/metrics-judgemental.mdx b/docs/docs/metrics-judgemental.mdx index 933b1e3d9..cee908375 100644 --- a/docs/docs/metrics-judgemental.mdx +++ b/docs/docs/metrics-judgemental.mdx @@ -39,7 +39,7 @@ code_correctness_metric = JudgementalGPT( criteria="Code Correctness - determine whether the code in the 'actual output' produces a valid JSON.", evaluation_params=[LLMTestCaseParams.ACTUAL_OUTPUT], language=Languages.SPANISH, - minimum_score=0.5, + threshold=0.5, ) ``` @@ -49,7 +49,7 @@ Under the hood, `JudgementalGPT` sends a request to Confident AI's servers that - `criteria`: a description outlining the specific evaluation aspects for each test case. - `evaluation_params`: a list of type `LLMTestCaseParams`. Include only the parameters that are relevant for evaluation. - [Optional] `language`: type `Language`, specifies what language to return the reasoning in. -- [Optional] `minimum_score`: the passing threshold, defaulted to 0.5. +- [Optional] `threshold`: the passing threshold, defaulted to 0.5. Similar to `LLMEvalMetric`, you can access the judgemental `score` and `reason` for `JudgementalGPT`: diff --git a/docs/docs/metrics-llm-evals.mdx b/docs/docs/metrics-llm-evals.mdx index 814f035eb..a7cdaff7c 100644 --- a/docs/docs/metrics-llm-evals.mdx +++ b/docs/docs/metrics-llm-evals.mdx @@ -38,14 +38,14 @@ There are three mandatory and two optional parameters required when instantiatin - `criteria`: a description outlining the specific evaluation aspects for each test case. - `evaluation_params`: a list of type `LLMTestCaseParams`. Include only the parameters that are relevant for evaluation. - [Optional] `evaluation_steps`: a list of strings outlining the exact steps the LLM should take for evaluation. You can only provide either `evaluation_steps` or `criteria`, and not both. -- [Optional] `minimum_score`: the passing threshold, defaulted to 0.5. +- [Optional] `threshold`: the passing threshold, defaulted to 0.5. - [Optional] `model`: the model name. This is defaulted to 'gpt-4-1106-preview' and we currently only support models from (Azure) OpenAI. :::danger For accurate and valid results, only the parameters that are mentioned in `criteria` should be included as a member of `evaluation_params`. ::: -As mentioned in the [metrics introduction section](metrics-introduction), all of `deepeval`'s metrics return a score ranging from 0 - 1, and a metric is only successful if the evaluation score is equal to or greater than `minimum_score`. An `LLMEvalMetric` is no exception. You can access the `score` and `reason` for each individual `LLMEvalMetric`: +As mentioned in the [metrics introduction section](metrics-introduction), all of `deepeval`'s metrics return a score ranging from 0 - 1, and a metric is only successful if the evaluation score is equal to or greater than `threshold`. An `LLMEvalMetric` is no exception. You can access the `score` and `reason` for each individual `LLMEvalMetric`: ```python from deepeval.test_case import LLMTestCase diff --git a/docs/docs/metrics-ragas.mdx b/docs/docs/metrics-ragas.mdx index 0ec7c2441..1fad689d7 100644 --- a/docs/docs/metrics-ragas.mdx +++ b/docs/docs/metrics-ragas.mdx @@ -42,7 +42,7 @@ expected_output = "You are eligible for a 30 day full refund at no extra cost." # Replace this with the actual retrieved context from your RAG pipeline retrieval_context = ["All customers are eligible for a 30 day full refund at no extra cost."] -metric = RagasMetric(minimum_score=0.5, model="gpt-3.5-turbo") +metric = RagasMetric(threshold=0.5, model="gpt-3.5-turbo") test_case = LLMTestCase( input="What if these shoes don't fit?", actual_output=actual_output, diff --git a/docs/docs/metrics-summarization.mdx b/docs/docs/metrics-summarization.mdx index 322b488c8..9b06deca9 100644 --- a/docs/docs/metrics-summarization.mdx +++ b/docs/docs/metrics-summarization.mdx @@ -46,7 +46,7 @@ from deepeval.test_case import LLMTestCase test_case = LLMTestCase(input=input, actual_output=actual_output) metric = SummarizationMetric( - minimum_score=0.5, + threshold=0.5, model="gpt-4", assessment_questions=[ "Is the inclusion score based on a percentage of 'yes' answers?", @@ -64,7 +64,7 @@ evaluate([test_case], [metric]) There are five optional parameters when instantiating an `SummarizationMetric` class: -- [Optional] `minimum_score`: the passing threshold, defaulted to 0.5. +- [Optional] `threshold`: the passing threshold, defaulted to 0.5. - [Optional] `model`: the model name. This is defaulted to 'gpt-4-1106-preview' and we currently only support models from (Azure) OpenAI. - [Optional] `assessment_questions`: a list of **close-ended questions that can be answered with either a 'yes' or a 'no'**. These are questions you want your summary to be able to ideally answer, and is especially helpful if you already know what a good summary for your use case looks like. If `assessment_questions` is not provided, we will generate a set of `assessment_questions` for you at evaluation time. The `assessment_questions` are used to calculate the `inclusion_score`. - [Optional] `n`: the number of questions to generate when calculating the `alignment_score` and `inclusion_score`, defaulted to 5. diff --git a/docs/docs/metrics-toxicity.mdx b/docs/docs/metrics-toxicity.mdx index 0af3e030a..8ab957d52 100644 --- a/docs/docs/metrics-toxicity.mdx +++ b/docs/docs/metrics-toxicity.mdx @@ -35,7 +35,7 @@ actual_output = "We offer a 30-day full refund at no extra cost." metric = UnBiasedMetric( evaluation_params=[LLMTestCaseParams.INPUT, LLMTestCaseParams.ACTUAL_OUTPUT], - minimum_score=0.5 + threshold=0.5 ) test_case = LLMTestCase( input="What if these shoes don't fit?", diff --git a/examples/getting_started/test_example.py b/examples/getting_started/test_example.py index d910a2526..ff66655f5 100644 --- a/examples/getting_started/test_example.py +++ b/examples/getting_started/test_example.py @@ -15,7 +15,7 @@ def test_hallucination(): # Replace this with the actual output from your LLM application actual_output = "We offer a 30-day full refund at no extra cost." - hallucination_metric = HallucinationMetric(minimum_score=0.7) + hallucination_metric = HallucinationMetric(threshold=0.7) test_case = LLMTestCase( input=input, actual_output=actual_output, context=context ) @@ -35,7 +35,7 @@ def test_summarization(): LLMTestCaseParams.INPUT, LLMTestCaseParams.ACTUAL_OUTPUT, ], - minimum_score=0.5, + threshold=0.5, ) test_case = LLMTestCase(input=input, actual_output=actual_output) assert_test(test_case, [summarization_metric]) @@ -44,10 +44,10 @@ def test_summarization(): class LengthMetric(BaseMetric): # This metric checks if the output length is greater than 10 characters def __init__(self, max_length: int = 10): - self.minimum_score = max_length + self.threshold = max_length def measure(self, test_case: LLMTestCase): - self.success = len(test_case.actual_output) > self.minimum_score + self.success = len(test_case.actual_output) > self.threshold if self.success: self.score = 1 else: @@ -80,7 +80,7 @@ def test_everything(): # Replace this with the actual output from your LLM application actual_output = "We offer a 30-day full refund at no extra cost." - hallucination_metric = HallucinationMetric(minimum_score=0.7) + hallucination_metric = HallucinationMetric(threshold=0.7) length_metric = LengthMetric(max_length=10) summarization_metric = LLMEvalMetric( name="Summarization", @@ -89,7 +89,7 @@ def test_everything(): LLMTestCaseParams.INPUT, LLMTestCaseParams.ACTUAL_OUTPUT, ], - minimum_score=0.5, + threshold=0.5, ) test_case = LLMTestCase( diff --git a/examples/tracing/test_chatbot.py b/examples/tracing/test_chatbot.py index 62177cf5d..c8a55ee8b 100644 --- a/examples/tracing/test_chatbot.py +++ b/examples/tracing/test_chatbot.py @@ -83,7 +83,7 @@ def test_hallucination(): ] input = "What are the requimrents to be president?" - metric = HallucinationMetric(minimum_score=0.8) + metric = HallucinationMetric(threshold=0.8) test_case = LLMTestCase( input=input, actual_output=chatbot.query(user_input=input), diff --git a/tests/test_answer_relevancy.py b/tests/test_answer_relevancy.py index 8d828cc82..2863bf091 100644 --- a/tests/test_answer_relevancy.py +++ b/tests/test_answer_relevancy.py @@ -47,7 +47,7 @@ @pytest.mark.skip(reason="openai is expensive") def test_answer_relevancy(): - metric = AnswerRelevancyMetric(minimum_score=0.5) + metric = AnswerRelevancyMetric(threshold=0.5) test_case = LLMTestCase( input=question, actual_output=answer, diff --git a/tests/test_contextual_precision.py b/tests/test_contextual_precision.py index 6bd36a189..0916f167a 100644 --- a/tests/test_contextual_precision.py +++ b/tests/test_contextual_precision.py @@ -61,7 +61,7 @@ @pytest.mark.skip(reason="openai is expensive") def test_contextual_precision(): - metric = ContextualPrecisionMetric(minimum_score=0.5) + metric = ContextualPrecisionMetric(threshold=0.5) test_case = LLMTestCase( input=question, actual_output=answer, diff --git a/tests/test_custom_metric.py b/tests/test_custom_metric.py index f9e75db0a..407b378e4 100644 --- a/tests/test_custom_metric.py +++ b/tests/test_custom_metric.py @@ -9,14 +9,14 @@ class LengthMetric(BaseMetric): """This metric checks if the output is more than 3 letters""" - def __init__(self, minimum_score: int = 3): - self.minimum_score = minimum_score + def __init__(self, threshold: int = 3): + self.threshold = threshold def measure(self, test_case: LLMTestCase): # sends to server text = test_case.actual_output score = len(text) - self.success = score > self.minimum_score + self.success = score > self.threshold # Optional: Logs it to the server return score diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 770bb05fc..d99ba8e98 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -41,5 +41,5 @@ def test_create_dataset(): # dataset, # ) # def test_customer_chatbot(test_case: LLMTestCase): -# hallucination_metric = HallucinationMetric(minimum_score=0.3) +# hallucination_metric = HallucinationMetric(threshold=0.3) # assert_test(test_case, [hallucination_metric]) diff --git a/tests/test_hallucination_metric.py b/tests/test_hallucination_metric.py index c37518785..13560d23c 100644 --- a/tests/test_hallucination_metric.py +++ b/tests/test_hallucination_metric.py @@ -5,7 +5,7 @@ def test_hallucination_metric(): - metric = HallucinationMetric(minimum_score=0.5) + metric = HallucinationMetric(threshold=0.5) test_case = LLMTestCase( input="placeholder", actual_output="A blond drinking water in public.", @@ -17,7 +17,7 @@ def test_hallucination_metric(): def test_hallucination_metric_2(): - metric = HallucinationMetric(minimum_score=0.6) + metric = HallucinationMetric(threshold=0.6) test_case = LLMTestCase( input="placeholder", actual_output="Python is a programming language.", @@ -28,7 +28,7 @@ def test_hallucination_metric_2(): def test_hallucination_metric_3(): - metric = HallucinationMetric(minimum_score=0.6) + metric = HallucinationMetric(threshold=0.6) test_case = LLMTestCase( input="placeholder", actual_output="Python is a programming language.", diff --git a/tests/test_judgemental.py b/tests/test_judgemental.py index 3b2181655..f7f8bfd62 100644 --- a/tests/test_judgemental.py +++ b/tests/test_judgemental.py @@ -20,7 +20,7 @@ def test_judgemntal(): LLMTestCaseParams.INPUT, ], language=Languages.SPANISH, - minimum_score=0.5, + threshold=0.5, ) assert_test(test_case, [metric]) diff --git a/tests/test_llm_metric.py b/tests/test_llm_metric.py index 4183ee2ff..54f34fb46 100644 --- a/tests/test_llm_metric.py +++ b/tests/test_llm_metric.py @@ -10,7 +10,7 @@ def test_chat_completion(): metric = LLMEvalMetric( name="Validity", criteria="The response is a valid response to the prompt.", - minimum_score=0.5, + threshold=0.5, evaluation_params=[ LLMTestCaseParams.INPUT, LLMTestCaseParams.ACTUAL_OUTPUT, @@ -41,7 +41,7 @@ def test_chat_completion(): # metric = LLMEvalMetric( # name="Validity", # criteria="The response is a valid response to the prompt.", -# minimum_score=0.5, +# threshold=0.5, # evaluation_params=[ # LLMTestCaseParams.INPUT, # LLMTestCaseParams.ACTUAL_OUTPUT, diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py index 504cb2b87..d33a71148 100644 --- a/tests/test_quickstart.py +++ b/tests/test_quickstart.py @@ -19,7 +19,7 @@ def test_llm_output(): test_case = LLMTestCase( input=input, actual_output=generate_llm_output(input), context=context ) - assert_test(test_case, [HallucinationMetric(minimum_score=0.5)]) + assert_test(test_case, [HallucinationMetric(threshold=0.5)]) def test_llm_output_custom(): @@ -29,7 +29,7 @@ def test_llm_output_custom(): input="Placerholder", actual_output=actual_output, context=context ) with pytest.raises(AssertionError): - assert_test(test_case, [HallucinationMetric(minimum_score=0.5)]) + assert_test(test_case, [HallucinationMetric(threshold=0.5)]) def test_0(): diff --git a/tests/test_ragas.py b/tests/test_ragas.py index a2e38d306..caa93a868 100644 --- a/tests/test_ragas.py +++ b/tests/test_ragas.py @@ -2,17 +2,17 @@ from deepeval.test_case import LLMTestCase from deepeval.metrics import ( RagasMetric, - ContextualPrecisionMetric, - ContextualRelevancyMetric, + RAGASContextualPrecisionMetric, + RAGASContextualRelevancyMetric, RAGASFaithfulnessMetric, - ContextualRecallMetric, + RAGASContextualRecallMetric, ConcisenessMetric, CorrectnessMetric, CoherenceMetric, MaliciousnessMetric, + RAGASAnswerRelevancyMetric, ) -from deepeval.metrics.ragas_metric import RAGASAnswerRelevancyMetric -from deepeval import assert_test, evaluate +from deepeval import assert_test query = "Who won the FIFA World Cup in 2018 and what was the score?" output = "Winners of the FIFA world cup were the French national football team" @@ -51,28 +51,28 @@ def test_everything(): retrieval_context=context, context=context, ) - # metric1 = ContextualRelevancyMetric(model="gpt-4") - # metric2 = RAGASFaithfulnessMetric(model="gpt-4") - # metric3 = ContextualRecallMetric(model="gpt-4") - # metric4 = ConcisenessMetric(model="gpt-4") - # metric5 = CorrectnessMetric(model="gpt-4") - # metric6 = CoherenceMetric(model="gpt-4") - # metric7 = MaliciousnessMetric(model="gpt-4") - # metric8 = RAGASAnswerRelevancyMetric(model="gpt-4") - metric9 = ContextualPrecisionMetric() - # metric10 = RagasMetric() + metric1 = RAGASContextualRelevancyMetric(model="gpt-4") + metric2 = RAGASFaithfulnessMetric(model="gpt-4") + metric3 = RAGASContextualRecallMetric(model="gpt-4") + metric4 = ConcisenessMetric(model="gpt-4") + metric5 = CorrectnessMetric(model="gpt-4") + metric6 = CoherenceMetric(model="gpt-4") + metric7 = MaliciousnessMetric(model="gpt-4") + metric8 = RAGASAnswerRelevancyMetric(model="gpt-4") + metric9 = RAGASContextualPrecisionMetric() + metric10 = RagasMetric() assert_test( test_case, [ - # metric1, - # metric2, - # metric3, - # metric4, - # metric5, - # metric6, - # metric7, - # metric8, + metric1, + metric2, + metric3, + metric4, + metric5, + metric6, + metric7, + metric8, metric9, - # metric10, + metric10, ], ) From 1e5fbab87d58ac2fbc8f5b5fb4ae59688b5ad350 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 14:49:41 -0800 Subject: [PATCH 05/15] Fix langchain chat models --- deepeval/metrics/__init__.py | 1 + deepeval/models/gpt_model.py | 20 +- poetry.lock | 518 +++++++++++++++++++---------------- pyproject.toml | 1 + setup.py | 1 + tests/test_faithfulness.py | 2 +- tests/test_ragas.py | 48 ++-- 7 files changed, 316 insertions(+), 275 deletions(-) diff --git a/deepeval/metrics/__init__.py b/deepeval/metrics/__init__.py index 51a7dd2b5..619d75ea0 100644 --- a/deepeval/metrics/__init__.py +++ b/deepeval/metrics/__init__.py @@ -11,6 +11,7 @@ from .contextual_precision import ContextualPrecisionMetric from .ragas_metric import ( RagasMetric, + RAGASAnswerRelevancyMetric, RAGASFaithfulnessMetric, RAGASContextualRecallMetric, RAGASContextualRelevancyMetric, diff --git a/deepeval/models/gpt_model.py b/deepeval/models/gpt_model.py index 6ca7cda8a..49637c8b7 100644 --- a/deepeval/models/gpt_model.py +++ b/deepeval/models/gpt_model.py @@ -1,7 +1,7 @@ import os from typing import Dict, Optional -from langchain.chat_models import ChatOpenAI, AzureChatOpenAI +from langchain_openai import ChatOpenAI, AzureChatOpenAI from deepeval.key_handler import KeyValues, KEY_FILE_HANDLER from deepeval.models.base import DeepEvalBaseModel from deepeval.chat_completion.retry import retry_with_exponential_backoff @@ -43,13 +43,6 @@ def __init__( def load_model(self): if self.should_use_azure_openai(): - model_version = KEY_FILE_HANDLER.fetch_data( - KeyValues.AZURE_MODEL_VERSION - ) - model_kwargs = {} - if model_version is not None: - model_kwargs["model_version"] = model_version - openai_api_key = KEY_FILE_HANDLER.fetch_data( KeyValues.AZURE_OPENAI_API_KEY ) @@ -63,13 +56,22 @@ def load_model(self): azure_endpoint = KEY_FILE_HANDLER.fetch_data( KeyValues.AZURE_OPENAI_ENDPOINT ) + + model_version = KEY_FILE_HANDLER.fetch_data( + KeyValues.AZURE_MODEL_VERSION + ) + + if model_version is None: + model_version = "" + return AzureChatOpenAI( openai_api_version=openai_api_version, azure_deployment=azure_deployment, azure_endpoint=azure_endpoint, openai_api_key=openai_api_key, - model_kwargs=model_kwargs, + model_version=model_version, ) + return ChatOpenAI( model_name=self.model_name, model_kwargs=self.model_kwargs ) diff --git a/poetry.lock b/poetry.lock index 34c10e78c..ac4bcea85 100644 --- a/poetry.lock +++ b/poetry.lock @@ -693,53 +693,53 @@ typing = ["typing-extensions (>=4.8)"] [[package]] name = "fonttools" -version = "4.47.0" +version = "4.47.2" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d2404107626f97a221dc1a65b05396d2bb2ce38e435f64f26ed2369f68675d9"}, - {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c01f409be619a9a0f5590389e37ccb58b47264939f0e8d58bfa1f3ba07d22671"}, - {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d986b66ff722ef675b7ee22fbe5947a41f60a61a4da15579d5e276d897fbc7fa"}, - {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8acf6dd0434b211b3bd30d572d9e019831aae17a54016629fa8224783b22df8"}, - {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:495369c660e0c27233e3c572269cbe520f7f4978be675f990f4005937337d391"}, - {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59227d7ba5b232281c26ae04fac2c73a79ad0e236bca5c44aae904a18f14faf"}, - {file = "fonttools-4.47.0-cp310-cp310-win32.whl", hash = "sha256:59a6c8b71a245800e923cb684a2dc0eac19c56493e2f896218fcf2571ed28984"}, - {file = "fonttools-4.47.0-cp310-cp310-win_amd64.whl", hash = "sha256:52c82df66201f3a90db438d9d7b337c7c98139de598d0728fb99dab9fd0495ca"}, - {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:854421e328d47d70aa5abceacbe8eef231961b162c71cbe7ff3f47e235e2e5c5"}, - {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:511482df31cfea9f697930f61520f6541185fa5eeba2fa760fe72e8eee5af88b"}, - {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0e2c88c8c985b7b9a7efcd06511fb0a1fe3ddd9a6cd2895ef1dbf9059719d7"}, - {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7a0a8848726956e9d9fb18c977a279013daadf0cbb6725d2015a6dd57527992"}, - {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e869da810ae35afb3019baa0d0306cdbab4760a54909c89ad8904fa629991812"}, - {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dd23848f877c3754f53a4903fb7a593ed100924f9b4bff7d5a4e2e8a7001ae11"}, - {file = "fonttools-4.47.0-cp311-cp311-win32.whl", hash = "sha256:bf1810635c00f7c45d93085611c995fc130009cec5abdc35b327156aa191f982"}, - {file = "fonttools-4.47.0-cp311-cp311-win_amd64.whl", hash = "sha256:61df4dee5d38ab65b26da8efd62d859a1eef7a34dcbc331299a28e24d04c59a7"}, - {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e3f4d61f3a8195eac784f1d0c16c0a3105382c1b9a74d99ac4ba421da39a8826"}, - {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:174995f7b057e799355b393e97f4f93ef1f2197cbfa945e988d49b2a09ecbce8"}, - {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea592e6a09b71cb7a7661dd93ac0b877a6228e2d677ebacbad0a4d118494c86d"}, - {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40bdbe90b33897d9cc4a39f8e415b0fcdeae4c40a99374b8a4982f127ff5c767"}, - {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:843509ae9b93db5aaf1a6302085e30bddc1111d31e11d724584818f5b698f500"}, - {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9acfa1cdc479e0dde528b61423855913d949a7f7fe09e276228298fef4589540"}, - {file = "fonttools-4.47.0-cp312-cp312-win32.whl", hash = "sha256:66c92ec7f95fd9732550ebedefcd190a8d81beaa97e89d523a0d17198a8bda4d"}, - {file = "fonttools-4.47.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8fa20748de55d0021f83754b371432dca0439e02847962fc4c42a0e444c2d78"}, - {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c75e19971209fbbce891ebfd1b10c37320a5a28e8d438861c21d35305aedb81c"}, - {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e79f1a3970d25f692bbb8c8c2637e621a66c0d60c109ab48d4a160f50856deff"}, - {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:562681188c62c024fe2c611b32e08b8de2afa00c0c4e72bed47c47c318e16d5c"}, - {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a77a60315c33393b2bd29d538d1ef026060a63d3a49a9233b779261bad9c3f71"}, - {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4fabb8cc9422efae1a925160083fdcbab8fdc96a8483441eb7457235df625bd"}, - {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a78dba8c2a1e9d53a0fb5382979f024200dc86adc46a56cbb668a2249862fda"}, - {file = "fonttools-4.47.0-cp38-cp38-win32.whl", hash = "sha256:e6b968543fde4119231c12c2a953dcf83349590ca631ba8216a8edf9cd4d36a9"}, - {file = "fonttools-4.47.0-cp38-cp38-win_amd64.whl", hash = "sha256:4a9a51745c0439516d947480d4d884fa18bd1458e05b829e482b9269afa655bc"}, - {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:62d8ddb058b8e87018e5dc26f3258e2c30daad4c87262dfeb0e2617dd84750e6"}, - {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dde0eab40faaa5476133123f6a622a1cc3ac9b7af45d65690870620323308b4"}, - {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4da089f6dfdb822293bde576916492cd708c37c2501c3651adde39804630538"}, - {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:253bb46bab970e8aae254cebf2ae3db98a4ef6bd034707aa68a239027d2b198d"}, - {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1193fb090061efa2f9e2d8d743ae9850c77b66746a3b32792324cdce65784154"}, - {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:084511482dd265bce6dca24c509894062f0117e4e6869384d853f46c0e6d43be"}, - {file = "fonttools-4.47.0-cp39-cp39-win32.whl", hash = "sha256:97620c4af36e4c849e52661492e31dc36916df12571cb900d16960ab8e92a980"}, - {file = "fonttools-4.47.0-cp39-cp39-win_amd64.whl", hash = "sha256:e77bdf52185bdaf63d39f3e1ac3212e6cfa3ab07d509b94557a8902ce9c13c82"}, - {file = "fonttools-4.47.0-py3-none-any.whl", hash = "sha256:d6477ba902dd2d7adda7f0fd3bfaeb92885d45993c9e1928c9f28fc3961415f7"}, - {file = "fonttools-4.47.0.tar.gz", hash = "sha256:ec13a10715eef0e031858c1c23bfaee6cba02b97558e4a7bfa089dba4a8c2ebf"}, + {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3b629108351d25512d4ea1a8393a2dba325b7b7d7308116b605ea3f8e1be88df"}, + {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c19044256c44fe299d9a73456aabee4b4d06c6b930287be93b533b4737d70aa1"}, + {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8be28c036b9f186e8c7eaf8a11b42373e7e4949f9e9f370202b9da4c4c3f56c"}, + {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f83a4daef6d2a202acb9bf572958f91cfde5b10c8ee7fb1d09a4c81e5d851fd8"}, + {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5a5318ba5365d992666ac4fe35365f93004109d18858a3e18ae46f67907670"}, + {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8f57ecd742545362a0f7186774b2d1c53423ed9ece67689c93a1055b236f638c"}, + {file = "fonttools-4.47.2-cp310-cp310-win32.whl", hash = "sha256:a1c154bb85dc9a4cf145250c88d112d88eb414bad81d4cb524d06258dea1bdc0"}, + {file = "fonttools-4.47.2-cp310-cp310-win_amd64.whl", hash = "sha256:3e2b95dce2ead58fb12524d0ca7d63a63459dd489e7e5838c3cd53557f8933e1"}, + {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:29495d6d109cdbabe73cfb6f419ce67080c3ef9ea1e08d5750240fd4b0c4763b"}, + {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0a1d313a415eaaba2b35d6cd33536560deeebd2ed758b9bfb89ab5d97dc5deac"}, + {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90f898cdd67f52f18049250a6474185ef6544c91f27a7bee70d87d77a8daf89c"}, + {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3480eeb52770ff75140fe7d9a2ec33fb67b07efea0ab5129c7e0c6a639c40c70"}, + {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0255dbc128fee75fb9be364806b940ed450dd6838672a150d501ee86523ac61e"}, + {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f791446ff297fd5f1e2247c188de53c1bfb9dd7f0549eba55b73a3c2087a2703"}, + {file = "fonttools-4.47.2-cp311-cp311-win32.whl", hash = "sha256:740947906590a878a4bde7dd748e85fefa4d470a268b964748403b3ab2aeed6c"}, + {file = "fonttools-4.47.2-cp311-cp311-win_amd64.whl", hash = "sha256:63fbed184979f09a65aa9c88b395ca539c94287ba3a364517698462e13e457c9"}, + {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4ec558c543609e71b2275c4894e93493f65d2f41c15fe1d089080c1d0bb4d635"}, + {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e040f905d542362e07e72e03612a6270c33d38281fd573160e1003e43718d68d"}, + {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dd58cc03016b281bd2c74c84cdaa6bd3ce54c5a7f47478b7657b930ac3ed8eb"}, + {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32ab2e9702dff0dd4510c7bb958f265a8d3dd5c0e2547e7b5f7a3df4979abb07"}, + {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a808f3c1d1df1f5bf39be869b6e0c263570cdafb5bdb2df66087733f566ea71"}, + {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac71e2e201df041a2891067dc36256755b1229ae167edbdc419b16da78732c2f"}, + {file = "fonttools-4.47.2-cp312-cp312-win32.whl", hash = "sha256:69731e8bea0578b3c28fdb43dbf95b9386e2d49a399e9a4ad736b8e479b08085"}, + {file = "fonttools-4.47.2-cp312-cp312-win_amd64.whl", hash = "sha256:b3e1304e5f19ca861d86a72218ecce68f391646d85c851742d265787f55457a4"}, + {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:254d9a6f7be00212bf0c3159e0a420eb19c63793b2c05e049eb337f3023c5ecc"}, + {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eabae77a07c41ae0b35184894202305c3ad211a93b2eb53837c2a1143c8bc952"}, + {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86a5ab2873ed2575d0fcdf1828143cfc6b977ac448e3dc616bb1e3d20efbafa"}, + {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13819db8445a0cec8c3ff5f243af6418ab19175072a9a92f6cc8ca7d1452754b"}, + {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4e743935139aa485fe3253fc33fe467eab6ea42583fa681223ea3f1a93dd01e6"}, + {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d49ce3ea7b7173faebc5664872243b40cf88814ca3eb135c4a3cdff66af71946"}, + {file = "fonttools-4.47.2-cp38-cp38-win32.whl", hash = "sha256:94208ea750e3f96e267f394d5588579bb64cc628e321dbb1d4243ffbc291b18b"}, + {file = "fonttools-4.47.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f750037e02beb8b3569fbff701a572e62a685d2a0e840d75816592280e5feae"}, + {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d71606c9321f6701642bd4746f99b6089e53d7e9817fc6b964e90d9c5f0ecc6"}, + {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86e0427864c6c91cf77f16d1fb9bf1bbf7453e824589e8fb8461b6ee1144f506"}, + {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a00bd0e68e88987dcc047ea31c26d40a3c61185153b03457956a87e39d43c37"}, + {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5d77479fb885ef38a16a253a2f4096bc3d14e63a56d6246bfdb56365a12b20c"}, + {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5465df494f20a7d01712b072ae3ee9ad2887004701b95cb2cc6dcb9c2c97a899"}, + {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4c811d3c73b6abac275babb8aa439206288f56fdb2c6f8835e3d7b70de8937a7"}, + {file = "fonttools-4.47.2-cp39-cp39-win32.whl", hash = "sha256:5b60e3afa9635e3dfd3ace2757039593e3bd3cf128be0ddb7a1ff4ac45fa5a50"}, + {file = "fonttools-4.47.2-cp39-cp39-win_amd64.whl", hash = "sha256:7ee48bd9d6b7e8f66866c9090807e3a4a56cf43ffad48962725a190e0dd774c8"}, + {file = "fonttools-4.47.2-py3-none-any.whl", hash = "sha256:7eb7ad665258fba68fd22228a09f347469d95a97fb88198e133595947a20a184"}, + {file = "fonttools-4.47.2.tar.gz", hash = "sha256:7df26dd3650e98ca45f1e29883c96a0b9f5bb6af8d632a6a108bc744fa0bd9b3"}, ] [package.extras] @@ -1010,13 +1010,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.20.1" +version = "0.20.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.20.1-py3-none-any.whl", hash = "sha256:ecfdea395a8bc68cd160106c5bd857f7e010768d95f9e1862a779010cc304831"}, - {file = "huggingface_hub-0.20.1.tar.gz", hash = "sha256:8c88c4c3c8853e22f2dfb4d84c3d493f4e1af52fb3856a90e1eeddcf191ddbb1"}, + {file = "huggingface_hub-0.20.2-py3-none-any.whl", hash = "sha256:53752eda2239d30a470c307a61cf9adcf136bc77b0a734338c7d04941af560d8"}, + {file = "huggingface_hub-0.20.2.tar.gz", hash = "sha256:215c5fceff631030c7a3d19ba7b588921c908b3f21eef31d160ebc245b200ff6"}, ] [package.dependencies] @@ -1064,13 +1064,13 @@ files = [ [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.3" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, ] [package.dependencies] @@ -1230,13 +1230,13 @@ files = [ [[package]] name = "langchain" -version = "0.0.353" +version = "0.1.0" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain-0.0.353-py3-none-any.whl", hash = "sha256:54cac8b74fbefacddcdf0c443619a7331d6b59fe94fa2a48a4d7da2b59cf1f63"}, - {file = "langchain-0.0.353.tar.gz", hash = "sha256:a095ea819f13a3606ced699182a8369eb2d77034ec8c913983675d6dd9a98196"}, + {file = "langchain-0.1.0-py3-none-any.whl", hash = "sha256:8652e74b039333a55c79faff4400b077ba1bd0ddce5255574e42d301c05c1733"}, + {file = "langchain-0.1.0.tar.gz", hash = "sha256:d43119f8d3fda2c8ddf8c3a19bd5b94b347e27d1867ff14a921b90bdbed0668a"}, ] [package.dependencies] @@ -1244,9 +1244,9 @@ aiohttp = ">=3.8.3,<4.0.0" async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} dataclasses-json = ">=0.5.7,<0.7" jsonpatch = ">=1.33,<2.0" -langchain-community = ">=0.0.2,<0.1" -langchain-core = ">=0.1.4,<0.2" -langsmith = ">=0.0.70,<0.1.0" +langchain-community = ">=0.0.9,<0.1" +langchain-core = ">=0.1.7,<0.2" +langsmith = ">=0.0.77,<0.1.0" numpy = ">=1,<2" pydantic = ">=1,<3" PyYAML = ">=5.3" @@ -1261,7 +1261,7 @@ cli = ["typer (>=0.9.0,<0.10.0)"] cohere = ["cohere (>=4,<5)"] docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.0.2,<0.1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] javascript = ["esprima (>=4.0.1,<5.0.0)"] llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"] @@ -1270,19 +1270,19 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"] [[package]] name = "langchain-community" -version = "0.0.7" +version = "0.0.11" description = "Community contributed LangChain integrations." optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain_community-0.0.7-py3-none-any.whl", hash = "sha256:468af187bfffe753426cc4548132824be7df9404d38ceef2f873087290d8ff0e"}, - {file = "langchain_community-0.0.7.tar.gz", hash = "sha256:cfbeb25cac7dff3c021f3c82aa243fc80f80082d6f6fdcc79daf36b1408828cc"}, + {file = "langchain_community-0.0.11-py3-none-any.whl", hash = "sha256:30ab1d7dbf35d0ebe684d8a1e8964e8dedd3d31a3703790436b39674cfa06f41"}, + {file = "langchain_community-0.0.11.tar.gz", hash = "sha256:eaeaa8d63427ecf0cb32fe2f1ba4d05ad6d5ef9f7019baf21dc2dde5b1403002"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" dataclasses-json = ">=0.5.7,<0.7" -langchain-core = ">=0.1,<0.2" +langchain-core = ">=0.1.8,<0.2" langsmith = ">=0.0.63,<0.1.0" numpy = ">=1,<2" PyYAML = ">=5.3" @@ -1292,17 +1292,17 @@ tenacity = ">=8.1.0,<9.0.0" [package.extras] cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"] [[package]] name = "langchain-core" -version = "0.1.4" +version = "0.1.10" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain_core-0.1.4-py3-none-any.whl", hash = "sha256:c62bd362d5abf5359436a99b29629e12a4d1ede9f1704dc958cdb8530a791efd"}, - {file = "langchain_core-0.1.4.tar.gz", hash = "sha256:f700138689c9014e23d3c29796a892dccf7f2a42901cb8817671823e1a24724c"}, + {file = "langchain_core-0.1.10-py3-none-any.whl", hash = "sha256:d89952f6d0766cfc88d9f1e25b84d56f8d7bd63a45ad8ec1a9a038c9b49df16d"}, + {file = "langchain_core-0.1.10.tar.gz", hash = "sha256:3c9e1383264c102fcc6f865700dbb9416c4931a25d0ac2195f6311c6b867aa17"}, ] [package.dependencies] @@ -1318,15 +1318,32 @@ tenacity = ">=8.1.0,<9.0.0" [package.extras] extended-testing = ["jinja2 (>=3,<4)"] +[[package]] +name = "langchain-openai" +version = "0.0.2" +description = "An integration package connecting OpenAI and LangChain" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "langchain_openai-0.0.2-py3-none-any.whl", hash = "sha256:0a46067be13ce95a029fdca339cd1034a61be1a727786178fbad702668a060f9"}, + {file = "langchain_openai-0.0.2.tar.gz", hash = "sha256:713af4a638f65b3af2f741a9d61991011c31939b070d81ede5b2e3cba625e01a"}, +] + +[package.dependencies] +langchain-core = ">=0.1.7,<0.2" +numpy = ">=1,<2" +openai = ">=1.6.1,<2.0.0" +tiktoken = ">=0.5.2,<0.6.0" + [[package]] name = "langsmith" -version = "0.0.75" +version = "0.0.79" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langsmith-0.0.75-py3-none-any.whl", hash = "sha256:3e008854204c5eaae007f34c7e249059218605689c385c037f6a40cac044833b"}, - {file = "langsmith-0.0.75.tar.gz", hash = "sha256:3fd44c58bd53cb9366af3de129c7f11b6947914f1bb598a585240df0e2c566eb"}, + {file = "langsmith-0.0.79-py3-none-any.whl", hash = "sha256:be0374e913c36d9f6a13dd6b6e20a506066d5a0f3abfd476f9cf9e0b086ed744"}, + {file = "langsmith-0.0.79.tar.gz", hash = "sha256:d32639ccd18a92533b302f6f482255619afc8eb007fff91e37ee699d947c5e29"}, ] [package.dependencies] @@ -1335,13 +1352,13 @@ requests = ">=2,<3" [[package]] name = "llama-index" -version = "0.9.24" +version = "0.9.29" description = "Interface between LLMs and your data" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "llama_index-0.9.24-py3-none-any.whl", hash = "sha256:aeef8a4fb478d45474261289046f37c2805e3bf3453c156c84088c0414465e5e"}, - {file = "llama_index-0.9.24.tar.gz", hash = "sha256:48175a35c30427f361068693d6f384baf76865831569ca4e04a1a8b6f10ba269"}, + {file = "llama_index-0.9.29-py3-none-any.whl", hash = "sha256:4ac6f589a2e8c5049882c185f25e6c57b68cf9d6e46a6f84102705a7dd357dd2"}, + {file = "llama_index-0.9.29.tar.gz", hash = "sha256:df5af84bf593cdf6a36da403f521bd5582eccbd6ca551da44d2e4e5b2b4f5619"}, ] [package.dependencies] @@ -1352,6 +1369,7 @@ deprecated = ">=1.2.9.3" fsspec = ">=2023.5.0" httpx = "*" nest-asyncio = ">=1.5.8,<2.0.0" +networkx = ">=3.0" nltk = ">=3.8.1,<4.0.0" numpy = "*" openai = ">=1.1.0" @@ -1366,8 +1384,8 @@ typing-inspect = ">=0.8.0" [package.extras] gradientai = ["gradientai (>=1.4.0)"] langchain = ["langchain (>=0.0.303)"] -local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.34.0,<5.0.0)"] -postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg-binary (>=3.1.12,<4.0.0)"] +local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.33.1,<5.0.0)"] +postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg-binary (>=3.1.12,<4.0.0)", "psycopg2 (>=2.9.9,<3.0.0)"] query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn", "spacy (>=3.7.1,<4.0.0)"] [[package]] @@ -1465,22 +1483,22 @@ files = [ [[package]] name = "marshmallow" -version = "3.20.1" +version = "3.20.2" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.20.1-py3-none-any.whl", hash = "sha256:684939db93e80ad3561392f47be0230743131560a41c5110684c16e21ade0a5c"}, - {file = "marshmallow-3.20.1.tar.gz", hash = "sha256:5d2371bbe42000f2b3fb5eaa065224df7d8f8597bc19a1bbfa5bfe7fba8da889"}, + {file = "marshmallow-3.20.2-py3-none-any.whl", hash = "sha256:c21d4b98fee747c130e6bc8f45c4b3199ea66bc00c12ee1f639f0aeca034d5e9"}, + {file = "marshmallow-3.20.2.tar.gz", hash = "sha256:4c1daff273513dc5eb24b219a8035559dc573c8f322558ef85f5438ddd1236dd"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] -dev = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"] -docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] -lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"] +dev = ["pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"] +docs = ["alabaster (==0.7.15)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] +lint = ["pre-commit (>=2.4,<4.0)"] tests = ["pytest", "pytz", "simplejson"] [[package]] @@ -1737,47 +1755,47 @@ twitter = ["twython"] [[package]] name = "numpy" -version = "1.26.2" +version = "1.26.3" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, - {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, - {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, - {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, - {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, - {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, - {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, - {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, - {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, - {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, - {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, + {file = "numpy-1.26.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:806dd64230dbbfaca8a27faa64e2f414bf1c6622ab78cc4264f7f5f028fee3bf"}, + {file = "numpy-1.26.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd"}, + {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d45b3ec2faed4baca41c76617fcdcfa4f684ff7a151ce6fc78ad3b6e85af0a6"}, + {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd2b45bf079d9ad90377048e2747a0c82351989a2165821f0c96831b4a2a54b"}, + {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:211ddd1e94817ed2d175b60b6374120244a4dd2287f4ece45d49228b4d529178"}, + {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1240f767f69d7c4c8a29adde2310b871153df9b26b5cb2b54a561ac85146485"}, + {file = "numpy-1.26.3-cp310-cp310-win32.whl", hash = "sha256:21a9484e75ad018974a2fdaa216524d64ed4212e418e0a551a2d83403b0531d3"}, + {file = "numpy-1.26.3-cp310-cp310-win_amd64.whl", hash = "sha256:9e1591f6ae98bcfac2a4bbf9221c0b92ab49762228f38287f6eeb5f3f55905ce"}, + {file = "numpy-1.26.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b831295e5472954104ecb46cd98c08b98b49c69fdb7040483aff799a755a7374"}, + {file = "numpy-1.26.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e87562b91f68dd8b1c39149d0323b42e0082db7ddb8e934ab4c292094d575d6"}, + {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c66d6fec467e8c0f975818c1796d25c53521124b7cfb760114be0abad53a0a2"}, + {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25e2811a9c932e43943a2615e65fc487a0b6b49218899e62e426e7f0a57eeda"}, + {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af36e0aa45e25c9f57bf684b1175e59ea05d9a7d3e8e87b7ae1a1da246f2767e"}, + {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:51c7f1b344f302067b02e0f5b5d2daa9ed4a721cf49f070280ac202738ea7f00"}, + {file = "numpy-1.26.3-cp311-cp311-win32.whl", hash = "sha256:7ca4f24341df071877849eb2034948459ce3a07915c2734f1abb4018d9c49d7b"}, + {file = "numpy-1.26.3-cp311-cp311-win_amd64.whl", hash = "sha256:39763aee6dfdd4878032361b30b2b12593fb445ddb66bbac802e2113eb8a6ac4"}, + {file = "numpy-1.26.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7081fd19a6d573e1a05e600c82a1c421011db7935ed0d5c483e9dd96b99cf13"}, + {file = "numpy-1.26.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12c70ac274b32bc00c7f61b515126c9205323703abb99cd41836e8125ea0043e"}, + {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f784e13e598e9594750b2ef6729bcd5a47f6cfe4a12cca13def35e06d8163e3"}, + {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f24750ef94d56ce6e33e4019a8a4d68cfdb1ef661a52cdaee628a56d2437419"}, + {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:77810ef29e0fb1d289d225cabb9ee6cf4d11978a00bb99f7f8ec2132a84e0166"}, + {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8ed07a90f5450d99dad60d3799f9c03c6566709bd53b497eb9ccad9a55867f36"}, + {file = "numpy-1.26.3-cp312-cp312-win32.whl", hash = "sha256:f73497e8c38295aaa4741bdfa4fda1a5aedda5473074369eca10626835445511"}, + {file = "numpy-1.26.3-cp312-cp312-win_amd64.whl", hash = "sha256:da4b0c6c699a0ad73c810736303f7fbae483bcb012e38d7eb06a5e3b432c981b"}, + {file = "numpy-1.26.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1666f634cb3c80ccbd77ec97bc17337718f56d6658acf5d3b906ca03e90ce87f"}, + {file = "numpy-1.26.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18c3319a7d39b2c6a9e3bb75aab2304ab79a811ac0168a671a62e6346c29b03f"}, + {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b7e807d6888da0db6e7e75838444d62495e2b588b99e90dd80c3459594e857b"}, + {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4d362e17bcb0011738c2d83e0a65ea8ce627057b2fdda37678f4374a382a137"}, + {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b8c275f0ae90069496068c714387b4a0eba5d531aace269559ff2b43655edd58"}, + {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc0743f0302b94f397a4a65a660d4cd24267439eb16493fb3caad2e4389bccbb"}, + {file = "numpy-1.26.3-cp39-cp39-win32.whl", hash = "sha256:9bc6d1a7f8cedd519c4b7b1156d98e051b726bf160715b769106661d567b3f03"}, + {file = "numpy-1.26.3-cp39-cp39-win_amd64.whl", hash = "sha256:867e3644e208c8922a3be26fc6bbf112a035f50f0a86497f98f228c50c607bb2"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3c67423b3703f8fbd90f5adaa37f85b5794d3366948efe9a5190a5f3a83fc34e"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46f47ee566d98849323f01b349d58f2557f02167ee301e5e28809a8c0e27a2d0"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8474703bffc65ca15853d5fd4d06b18138ae90c17c8d12169968e998e448bb5"}, + {file = "numpy-1.26.3.tar.gz", hash = "sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4"}, ] [[package]] @@ -1923,13 +1941,13 @@ files = [ [[package]] name = "openai" -version = "1.6.1" +version = "1.7.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.6.1-py3-none-any.whl", hash = "sha256:bc9f774838d67ac29fb24cdeb2d58faf57de8b311085dcd1348f7aa02a96c7ee"}, - {file = "openai-1.6.1.tar.gz", hash = "sha256:d553ca9dbf9486b08e75b09e8671e4f638462aaadccfced632bf490fc3d75fa2"}, + {file = "openai-1.7.1-py3-none-any.whl", hash = "sha256:e52ad7ea015331edc584e6e9c98741c819d7ffbbd2ecc50bf1f55c33f9cb3f77"}, + {file = "openai-1.7.1.tar.gz", hash = "sha256:7556e6aa30e20254b1ad68de49bb5ef4d8106bfac5e8a78abdc1daa911fbb1fb"}, ] [package.dependencies] @@ -2035,70 +2053,88 @@ files = [ [[package]] name = "pillow" -version = "10.1.0" +version = "10.2.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, - {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, - {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, - {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, - {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, - {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, - {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, - {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"}, + {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"}, + {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"}, + {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"}, + {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"}, + {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"}, + {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"}, + {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"}, + {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"}, + {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"}, + {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"}, + {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"}, + {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"}, + {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"}, + {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"}, + {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"}, + {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"}, ] [package.extras] docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] [[package]] name = "platformdirs" @@ -2151,22 +2187,22 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "protobuf" -version = "4.25.1" +version = "4.25.2" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"}, - {file = "protobuf-4.25.1-cp310-abi3-win_amd64.whl", hash = "sha256:3497c1af9f2526962f09329fd61a36566305e6c72da2590ae0d7d1322818843b"}, - {file = "protobuf-4.25.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd"}, - {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:0f881b589ff449bf0b931a711926e9ddaad3b35089cc039ce1af50b21a4ae8cb"}, - {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ca37bf6a6d0046272c152eea90d2e4ef34593aaa32e8873fc14c16440f22d4b7"}, - {file = "protobuf-4.25.1-cp38-cp38-win32.whl", hash = "sha256:abc0525ae2689a8000837729eef7883b9391cd6aa7950249dcf5a4ede230d5dd"}, - {file = "protobuf-4.25.1-cp38-cp38-win_amd64.whl", hash = "sha256:1484f9e692091450e7edf418c939e15bfc8fc68856e36ce399aed6889dae8bb0"}, - {file = "protobuf-4.25.1-cp39-cp39-win32.whl", hash = "sha256:8bdbeaddaac52d15c6dce38c71b03038ef7772b977847eb6d374fc86636fa510"}, - {file = "protobuf-4.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:becc576b7e6b553d22cbdf418686ee4daa443d7217999125c045ad56322dda10"}, - {file = "protobuf-4.25.1-py3-none-any.whl", hash = "sha256:a19731d5e83ae4737bb2a089605e636077ac001d18781b3cf489b9546c7c80d6"}, - {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"}, + {file = "protobuf-4.25.2-cp310-abi3-win32.whl", hash = "sha256:b50c949608682b12efb0b2717f53256f03636af5f60ac0c1d900df6213910fd6"}, + {file = "protobuf-4.25.2-cp310-abi3-win_amd64.whl", hash = "sha256:8f62574857ee1de9f770baf04dde4165e30b15ad97ba03ceac65f760ff018ac9"}, + {file = "protobuf-4.25.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2db9f8fa64fbdcdc93767d3cf81e0f2aef176284071507e3ede160811502fd3d"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:10894a2885b7175d3984f2be8d9850712c57d5e7587a2410720af8be56cdaf62"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fc381d1dd0516343f1440019cedf08a7405f791cd49eef4ae1ea06520bc1c020"}, + {file = "protobuf-4.25.2-cp38-cp38-win32.whl", hash = "sha256:33a1aeef4b1927431d1be780e87b641e322b88d654203a9e9d93f218ee359e61"}, + {file = "protobuf-4.25.2-cp38-cp38-win_amd64.whl", hash = "sha256:47f3de503fe7c1245f6f03bea7e8d3ec11c6c4a2ea9ef910e3221c8a15516d62"}, + {file = "protobuf-4.25.2-cp39-cp39-win32.whl", hash = "sha256:5e5c933b4c30a988b52e0b7c02641760a5ba046edc5e43d3b94a74c9fc57c1b3"}, + {file = "protobuf-4.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:d66a769b8d687df9024f2985d5137a337f957a0916cf5464d1513eee96a63ff0"}, + {file = "protobuf-4.25.2-py3-none-any.whl", hash = "sha256:a8b7a98d4ce823303145bf3c1a8bdb0f2f4642a414b196f04ad9853ed0c8f830"}, + {file = "protobuf-4.25.2.tar.gz", hash = "sha256:fe599e175cb347efc8ee524bcd4b902d11f7262c0e569ececcb89995c15f0a5e"}, ] [[package]] @@ -3051,13 +3087,13 @@ files = [ [[package]] name = "sentry-sdk" -version = "1.39.1" +version = "1.39.2" description = "Python client for Sentry (https://sentry.io)" optional = false python-versions = "*" files = [ - {file = "sentry-sdk-1.39.1.tar.gz", hash = "sha256:320a55cdf9da9097a0bead239c35b7e61f53660ef9878861824fd6d9b2eaf3b5"}, - {file = "sentry_sdk-1.39.1-py2.py3-none-any.whl", hash = "sha256:81b5b9ffdd1a374e9eb0c053b5d2012155db9cbe76393a8585677b753bd5fdc1"}, + {file = "sentry-sdk-1.39.2.tar.gz", hash = "sha256:24c83b0b41c887d33328a9166f5950dc37ad58f01c9f2fbff6b87a6f1094170c"}, + {file = "sentry_sdk-1.39.2-py2.py3-none-any.whl", hash = "sha256:acaf597b30258fc7663063b291aa99e58f3096e91fe1e6634f4b79f9c1943e8e"}, ] [package.dependencies] @@ -3083,7 +3119,7 @@ huey = ["huey (>=2)"] loguru = ["loguru (>=0.5)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] -pure-eval = ["asttokens", "executing", "pure-eval"] +pure-eval = ["asttokens", "executing", "pure_eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] @@ -3145,65 +3181,65 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.24" +version = "2.0.25" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f801d85ba4753d4ed97181d003e5d3fa330ac7c4587d131f61d7f968f416862"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b35c35e3923ade1e7ac44e150dec29f5863513246c8bf85e2d7d313e3832bcfb"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d9b3fd5eca3c0b137a5e0e468e24ca544ed8ca4783e0e55341b7ed2807518ee"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a6209e689d0ff206c40032b6418e3cfcfc5af044b3f66e381d7f1ae301544b4"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:37e89d965b52e8b20571b5d44f26e2124b26ab63758bf1b7598a0e38fb2c4005"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6910eb4ea90c0889f363965cd3c8c45a620ad27b526a7899f0054f6c1b9219e"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-win32.whl", hash = "sha256:d8e7e8a150e7b548e7ecd6ebb9211c37265991bf2504297d9454e01b58530fc6"}, - {file = "SQLAlchemy-2.0.24-cp310-cp310-win_amd64.whl", hash = "sha256:396f05c552f7fa30a129497c41bef5b4d1423f9af8fe4df0c3dcd38f3e3b9a14"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:adbd67dac4ebf54587198b63cd30c29fd7eafa8c0cab58893d9419414f8efe4b"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a0f611b431b84f55779cbb7157257d87b4a2876b067c77c4f36b15e44ced65e2"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56a0e90a959e18ac5f18c80d0cad9e90cb09322764f536e8a637426afb1cae2f"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6db686a1d9f183c639f7e06a2656af25d4ed438eda581de135d15569f16ace33"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0cc0b486a56dff72dddae6b6bfa7ff201b0eeac29d4bc6f0e9725dc3c360d71"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a1d4856861ba9e73bac05030cec5852eabfa9ef4af8e56c19d92de80d46fc34"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-win32.whl", hash = "sha256:a3c2753bf4f48b7a6024e5e8a394af49b1b12c817d75d06942cae03d14ff87b3"}, - {file = "SQLAlchemy-2.0.24-cp311-cp311-win_amd64.whl", hash = "sha256:38732884eabc64982a09a846bacf085596ff2371e4e41d20c0734f7e50525d01"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9f992e0f916201731993eab8502912878f02287d9f765ef843677ff118d0e0b1"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2587e108463cc2e5b45a896b2e7cc8659a517038026922a758bde009271aed11"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bb7cedcddffca98c40bb0becd3423e293d1fef442b869da40843d751785beb3"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fa6df0e035689df89ff77a46bf8738696785d3156c2c61494acdcddc75c69d"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:cc889fda484d54d0b31feec409406267616536d048a450fc46943e152700bb79"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57ef6f2cb8b09a042d0dbeaa46a30f2df5dd1e1eb889ba258b0d5d7d6011b81c"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-win32.whl", hash = "sha256:ea490564435b5b204d8154f0e18387b499ea3cedc1e6af3b3a2ab18291d85aa7"}, - {file = "SQLAlchemy-2.0.24-cp312-cp312-win_amd64.whl", hash = "sha256:ccfd336f96d4c9bbab0309f2a565bf15c468c2d8b2d277a32f89c5940f71fcf9"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9aaaaa846b10dfbe1bda71079d0e31a7e2cebedda9409fa7dba3dfed1ae803e8"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95bae3d38f8808d79072da25d5e5a6095f36fe1f9d6c614dd72c59ca8397c7c0"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a04191a7c8d77e63f6fc1e8336d6c6e93176c0c010833e74410e647f0284f5a1"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:acc58b7c2e40235712d857fdfc8f2bda9608f4a850d8d9ac0dd1fc80939ca6ac"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:00d76fe5d7cdb5d84d625ce002ce29fefba0bfd98e212ae66793fed30af73931"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-win32.whl", hash = "sha256:29e51f848f843bbd75d74ae64ab1ab06302cb1dccd4549d1f5afe6b4a946edb2"}, - {file = "SQLAlchemy-2.0.24-cp37-cp37m-win_amd64.whl", hash = "sha256:e9d036e343a604db3f5a6c33354018a84a1d3f6dcae3673358b404286204798c"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9bafaa05b19dc07fa191c1966c5e852af516840b0d7b46b7c3303faf1a349bc9"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e69290b921b7833c04206f233d6814c60bee1d135b09f5ae5d39229de9b46cd4"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8398593ccc4440ce6dffcc4f47d9b2d72b9fe7112ac12ea4a44e7d4de364db1"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f073321a79c81e1a009218a21089f61d87ee5fa3c9563f6be94f8b41ff181812"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9036ebfd934813990c5b9f71f297e77ed4963720db7d7ceec5a3fdb7cd2ef6ce"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcf84fe93397a0f67733aa2a38ed4eab9fc6348189fc950e656e1ea198f45668"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-win32.whl", hash = "sha256:6f5e75de91c754365c098ac08c13fdb267577ce954fa239dd49228b573ca88d7"}, - {file = "SQLAlchemy-2.0.24-cp38-cp38-win_amd64.whl", hash = "sha256:9f29c7f0f4b42337ec5a779e166946a9f86d7d56d827e771b69ecbdf426124ac"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:07cc423892f2ceda9ae1daa28c0355757f362ecc7505b1ab1a3d5d8dc1c44ac6"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a479aa1ab199178ff1956b09ca8a0693e70f9c762875d69292d37049ffd0d8f"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b8d0e8578e7f853f45f4512b5c920f6a546cd4bed44137460b2a56534644205"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17e7e27af178d31b436dda6a596703b02a89ba74a15e2980c35ecd9909eea3a"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1ca7903d5e7db791a355b579c690684fac6304478b68efdc7f2ebdcfe770d8d7"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db09e424d7bb89b6215a184ca93b4f29d7f00ea261b787918a1af74143b98c06"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-win32.whl", hash = "sha256:a5cd7d30e47f87b21362beeb3e86f1b5886e7d9b0294b230dde3d3f4a1591375"}, - {file = "SQLAlchemy-2.0.24-cp39-cp39-win_amd64.whl", hash = "sha256:7ae5d44517fe81079ce75cf10f96978284a6db2642c5932a69c82dbae09f009a"}, - {file = "SQLAlchemy-2.0.24-py3-none-any.whl", hash = "sha256:8f358f5cfce04417b6ff738748ca4806fe3d3ae8040fb4e6a0c9a6973ccf9b6e"}, - {file = "SQLAlchemy-2.0.24.tar.gz", hash = "sha256:6db97656fd3fe3f7e5b077f12fa6adb5feb6e0b567a3e99f47ecf5f7ea0a09e3"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4344d059265cc8b1b1be351bfb88749294b87a8b2bbe21dfbe066c4199541ebd"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9e2e59cbcc6ba1488404aad43de005d05ca56e069477b33ff74e91b6319735"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84daa0a2055df9ca0f148a64fdde12ac635e30edbca80e87df9b3aaf419e144a"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc8b7dabe8e67c4832891a5d322cec6d44ef02f432b4588390017f5cec186a84"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f5693145220517b5f42393e07a6898acdfe820e136c98663b971906120549da5"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db854730a25db7c956423bb9fb4bdd1216c839a689bf9cc15fada0a7fb2f4570"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-win32.whl", hash = "sha256:14a6f68e8fc96e5e8f5647ef6cda6250c780612a573d99e4d881581432ef1669"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-win_amd64.whl", hash = "sha256:87f6e732bccd7dcf1741c00f1ecf33797383128bd1c90144ac8adc02cbb98643"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:342d365988ba88ada8af320d43df4e0b13a694dbd75951f537b2d5e4cb5cd002"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f37c0caf14b9e9b9e8f6dbc81bc56db06acb4363eba5a633167781a48ef036ed"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9373708763ef46782d10e950b49d0235bfe58facebd76917d3f5cbf5971aed"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d24f571990c05f6b36a396218f251f3e0dda916e0c687ef6fdca5072743208f5"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75432b5b14dc2fff43c50435e248b45c7cdadef73388e5610852b95280ffd0e9"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:884272dcd3ad97f47702965a0e902b540541890f468d24bd1d98bcfe41c3f018"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-win32.whl", hash = "sha256:e607cdd99cbf9bb80391f54446b86e16eea6ad309361942bf88318bcd452363c"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d505815ac340568fd03f719446a589162d55c52f08abd77ba8964fbb7eb5b5f"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0dacf67aee53b16f365c589ce72e766efaabd2b145f9de7c917777b575e3659d"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b801154027107461ee992ff4b5c09aa7cc6ec91ddfe50d02bca344918c3265c6"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59a21853f5daeb50412d459cfb13cb82c089ad4c04ec208cd14dddd99fc23b39"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29049e2c299b5ace92cbed0c1610a7a236f3baf4c6b66eb9547c01179f638ec5"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b64b183d610b424a160b0d4d880995e935208fc043d0302dd29fee32d1ee3f95"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f7a7d7fcc675d3d85fbf3b3828ecd5990b8d61bd6de3f1b260080b3beccf215"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-win32.whl", hash = "sha256:cf18ff7fc9941b8fc23437cc3e68ed4ebeff3599eec6ef5eebf305f3d2e9a7c2"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-win_amd64.whl", hash = "sha256:91f7d9d1c4dd1f4f6e092874c128c11165eafcf7c963128f79e28f8445de82d5"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bb209a73b8307f8fe4fe46f6ad5979649be01607f11af1eb94aa9e8a3aaf77f0"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:798f717ae7c806d67145f6ae94dc7c342d3222d3b9a311a784f371a4333212c7"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd402169aa00df3142149940b3bf9ce7dde075928c1886d9a1df63d4b8de62"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d3cab3076af2e4aa5693f89622bef7fa770c6fec967143e4da7508b3dceb9b9"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:74b080c897563f81062b74e44f5a72fa44c2b373741a9ade701d5f789a10ba23"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-win32.whl", hash = "sha256:87d91043ea0dc65ee583026cb18e1b458d8ec5fc0a93637126b5fc0bc3ea68c4"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-win_amd64.whl", hash = "sha256:75f99202324383d613ddd1f7455ac908dca9c2dd729ec8584c9541dd41822a2c"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:420362338681eec03f53467804541a854617faed7272fe71a1bfdb07336a381e"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c88f0c7dcc5f99bdb34b4fd9b69b93c89f893f454f40219fe923a3a2fd11625"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3be4987e3ee9d9a380b66393b77a4cd6d742480c951a1c56a23c335caca4ce3"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a159111a0f58fb034c93eeba211b4141137ec4b0a6e75789ab7a3ef3c7e7e3"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8b8cb63d3ea63b29074dcd29da4dc6a97ad1349151f2d2949495418fd6e48db9"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:736ea78cd06de6c21ecba7416499e7236a22374561493b456a1f7ffbe3f6cdb4"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-win32.whl", hash = "sha256:10331f129982a19df4284ceac6fe87353ca3ca6b4ca77ff7d697209ae0a5915e"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-win_amd64.whl", hash = "sha256:c55731c116806836a5d678a70c84cb13f2cedba920212ba7dcad53260997666d"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:605b6b059f4b57b277f75ace81cc5bc6335efcbcc4ccb9066695e515dbdb3900"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:665f0a3954635b5b777a55111ababf44b4fc12b1f3ba0a435b602b6387ffd7cf"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecf6d4cda1f9f6cb0b45803a01ea7f034e2f1aed9475e883410812d9f9e3cfcf"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c51db269513917394faec5e5c00d6f83829742ba62e2ac4fa5c98d58be91662f"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:790f533fa5c8901a62b6fef5811d48980adeb2f51f1290ade8b5e7ba990ba3de"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1b1180cda6df7af84fe72e4530f192231b1f29a7496951db4ff38dac1687202d"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-win32.whl", hash = "sha256:555651adbb503ac7f4cb35834c5e4ae0819aab2cd24857a123370764dc7d7e24"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-win_amd64.whl", hash = "sha256:dc55990143cbd853a5d038c05e79284baedf3e299661389654551bd02a6a68d7"}, + {file = "SQLAlchemy-2.0.25-py3-none-any.whl", hash = "sha256:a86b4240e67d4753dc3092d9511886795b3c2852abe599cffe108952f7af7ac3"}, + {file = "SQLAlchemy-2.0.25.tar.gz", hash = "sha256:a2c69a7664fb2d54b8682dd774c3b54f67f84fa123cf84dda2a5f40dcaa04e08"}, ] [package.dependencies] greenlet = {version = "!=0.4.17", optional = true, markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\" or extra == \"asyncio\""} -typing-extensions = ">=4.2.0" +typing-extensions = ">=4.6.0" [package.extras] aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] @@ -4055,4 +4091,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.12" -content-hash = "2b0939b9cce5fa623a932eb89645a6ef3cc7384addd232339faded4a4c35cab7" +content-hash = "1acc3ce5c08e0d3af483964b46a783bab32b37eb09f762b55d46826514210252" diff --git a/pyproject.toml b/pyproject.toml index df3ada338..fbd2d4718 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,6 +37,7 @@ black = "*" portalocker = "*" openai = "*" langchain = "*" +langchain_openai = "*" protobuf = "*" typer = "*" setuptools = "*" diff --git a/setup.py b/setup.py index ab5d4584d..135a26e56 100644 --- a/setup.py +++ b/setup.py @@ -26,6 +26,7 @@ "pytest-xdist", "portalocker", "langchain", + "langchain_openai", "rouge_score==0.1.2", "nltk==3.8.1", "ragas", diff --git a/tests/test_faithfulness.py b/tests/test_faithfulness.py index d3595be69..7b3a5c27c 100644 --- a/tests/test_faithfulness.py +++ b/tests/test_faithfulness.py @@ -38,7 +38,7 @@ """ -# @pytest.mark.skip(reason="openai is expensive") +@pytest.mark.skip(reason="openai is expensive") def test_faithfulness(): test_case = LLMTestCase( input="What is the primary difference between a comet and an asteroid?", diff --git a/tests/test_ragas.py b/tests/test_ragas.py index a2e38d306..caa93a868 100644 --- a/tests/test_ragas.py +++ b/tests/test_ragas.py @@ -2,17 +2,17 @@ from deepeval.test_case import LLMTestCase from deepeval.metrics import ( RagasMetric, - ContextualPrecisionMetric, - ContextualRelevancyMetric, + RAGASContextualPrecisionMetric, + RAGASContextualRelevancyMetric, RAGASFaithfulnessMetric, - ContextualRecallMetric, + RAGASContextualRecallMetric, ConcisenessMetric, CorrectnessMetric, CoherenceMetric, MaliciousnessMetric, + RAGASAnswerRelevancyMetric, ) -from deepeval.metrics.ragas_metric import RAGASAnswerRelevancyMetric -from deepeval import assert_test, evaluate +from deepeval import assert_test query = "Who won the FIFA World Cup in 2018 and what was the score?" output = "Winners of the FIFA world cup were the French national football team" @@ -51,28 +51,28 @@ def test_everything(): retrieval_context=context, context=context, ) - # metric1 = ContextualRelevancyMetric(model="gpt-4") - # metric2 = RAGASFaithfulnessMetric(model="gpt-4") - # metric3 = ContextualRecallMetric(model="gpt-4") - # metric4 = ConcisenessMetric(model="gpt-4") - # metric5 = CorrectnessMetric(model="gpt-4") - # metric6 = CoherenceMetric(model="gpt-4") - # metric7 = MaliciousnessMetric(model="gpt-4") - # metric8 = RAGASAnswerRelevancyMetric(model="gpt-4") - metric9 = ContextualPrecisionMetric() - # metric10 = RagasMetric() + metric1 = RAGASContextualRelevancyMetric(model="gpt-4") + metric2 = RAGASFaithfulnessMetric(model="gpt-4") + metric3 = RAGASContextualRecallMetric(model="gpt-4") + metric4 = ConcisenessMetric(model="gpt-4") + metric5 = CorrectnessMetric(model="gpt-4") + metric6 = CoherenceMetric(model="gpt-4") + metric7 = MaliciousnessMetric(model="gpt-4") + metric8 = RAGASAnswerRelevancyMetric(model="gpt-4") + metric9 = RAGASContextualPrecisionMetric() + metric10 = RagasMetric() assert_test( test_case, [ - # metric1, - # metric2, - # metric3, - # metric4, - # metric5, - # metric6, - # metric7, - # metric8, + metric1, + metric2, + metric3, + metric4, + metric5, + metric6, + metric7, + metric8, metric9, - # metric10, + metric10, ], ) From 43675ae5b50b68fa47d2f24108409dbd2aa4b8ce Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 18:36:17 -0800 Subject: [PATCH 06/15] Added threshold --- deepeval/test_run/api.py | 1 + deepeval/test_run/test_run.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/deepeval/test_run/api.py b/deepeval/test_run/api.py index 3a3f37b5b..a80e55cce 100644 --- a/deepeval/test_run/api.py +++ b/deepeval/test_run/api.py @@ -6,6 +6,7 @@ class MetricsMetadata(BaseModel): metric: str score: float threshold: float + success: bool reason: Optional[str] = None diff --git a/deepeval/test_run/test_run.py b/deepeval/test_run/test_run.py index 3a1c41ffa..e382895c7 100644 --- a/deepeval/test_run/test_run.py +++ b/deepeval/test_run/test_run.py @@ -93,7 +93,7 @@ def add_llm_test_case( score=metric.score, threshold=metric.threshold, reason=metric.reason, - # success=metric.is_successful() + success=metric.is_successful(), ) if existing_test_case: From a4d80237bc3970f7a0c7d544a23ce3dab1168c33 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 18:41:12 -0800 Subject: [PATCH 07/15] new release --- deepeval/_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deepeval/_version.py b/deepeval/_version.py index ee489183b..3180b565a 100644 --- a/deepeval/_version.py +++ b/deepeval/_version.py @@ -1 +1 @@ -__version__: str = "0.20.44" +__version__: str = "0.20.45" diff --git a/pyproject.toml b/pyproject.toml index fbd2d4718..daad81439 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "deepeval" -version = "0.20.44" +version = "0.20.45" description = "The Evaluation Framework for LLMs" authors = ["Jeffrey Ip "] license = "Apache-2.0" From c6a7a5513536802f99b2007fe8bb2bae2c4bc534 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 18:54:05 -0800 Subject: [PATCH 08/15] Updated docs --- docs/docs/metrics-custom.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/docs/metrics-custom.mdx b/docs/docs/metrics-custom.mdx index 2cadb8f3e..afe4bb9cb 100644 --- a/docs/docs/metrics-custom.mdx +++ b/docs/docs/metrics-custom.mdx @@ -24,14 +24,14 @@ from deepeval.metrics import BaseMetric from deepeval.test_case import LLMTestCase # Inherit BaseMetric -class LengthMetric(BaseMetric): - # This metric checks if the output length is greater than 10 characters - def __init__(self, max_length: int=10): - self.threshold = max_length +class LatencyMetric(BaseMetric): + # This metric by default checks if the latency is greater than 10 seconds + def __init__(self, max_seconds: int=10): + self.threshold = max_seconds def measure(self, test_case: LLMTestCase): # Set self.success and self.score in the "measure" method - self.success = len(test_case.actual_output) > self.threshold + self.success = len(test_case.execution_time) <= self.threshold if self.success: self.score = 1 else: @@ -47,7 +47,7 @@ class LengthMetric(BaseMetric): @property def name(self): - return "Length" + return "Latency" ``` Notice that a few things has happened: From 02559e27da04a360bead18bdddef2d9d4e8d69ba Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 19:09:47 -0800 Subject: [PATCH 09/15] Fix display logic --- deepeval/test_run/test_run.py | 10 +++++----- tests/test_custom_execution_time.py | 29 --------------------------- tests/test_custom_metric.py | 31 +++++++++++++++++------------ 3 files changed, 23 insertions(+), 47 deletions(-) delete mode 100644 tests/test_custom_execution_time.py diff --git a/deepeval/test_run/test_run.py b/deepeval/test_run/test_run.py index e382895c7..dd82c9523 100644 --- a/deepeval/test_run/test_run.py +++ b/deepeval/test_run/test_run.py @@ -88,7 +88,7 @@ def add_llm_test_case( test_case_id, None ) - metrics_metadata = MetricsMetadata( + metric_metadata = MetricsMetadata( metric=metric.__name__, score=metric.score, threshold=metric.threshold, @@ -98,7 +98,7 @@ def add_llm_test_case( if existing_test_case: # If it exists, append the metrics to the existing test case - existing_test_case.metrics_metadata.append(metrics_metadata) + existing_test_case.metrics_metadata.append(metric_metadata) if metric.is_successful() and existing_test_case.success == True: success = True else: @@ -112,7 +112,7 @@ def add_llm_test_case( actualOutput=test_case.actual_output, expectedOutput=test_case.expected_output, success=metric.is_successful(), - metricsMetadata=[metrics_metadata], + metricsMetadata=[metric_metadata], runDuration=run_duration, context=test_case.context, retrievalContext=test_case.retrieval_context, @@ -214,7 +214,7 @@ def display_results_table(self, test_run: TestRun): test_case_name += f" ({test_case.id})" for metric_metadata in test_case.metrics_metadata: - if metric_metadata.score >= metric_metadata.threshold: + if metric_metadata.success: pass_count += 1 else: fail_count += 1 @@ -228,7 +228,7 @@ def display_results_table(self, test_run: TestRun): ) for metric_metadata in test_case.metrics_metadata: - if metric_metadata.score >= metric_metadata.threshold: + if metric_metadata.success: status = "[green]PASSED[/green]" else: status = "[red]FAILED[/red]" diff --git a/tests/test_custom_execution_time.py b/tests/test_custom_execution_time.py deleted file mode 100644 index 488432cad..000000000 --- a/tests/test_custom_execution_time.py +++ /dev/null @@ -1,29 +0,0 @@ -# from deepeval.metrics import BaseMetric -# from deepeval.test_case import LLMTestCase -# from deepeval import assert_test - -# class ExecutionTimeMetric(BaseMetric): -# def __init__(self, max_execution_time: float): -# self.max_execution_time = max_execution_time - -# def measure(self, test_case: LLMTestCase): -# self.success = test_case.execution_time <= self.max_execution_time -# if self.success: -# self.score = 1 -# else: -# self.score = 0 - -# return self.score - -# def is_successful(self): -# return self.success - -# @property -# def name(self): -# return "Execution Time" - - -# def test_execution_time(): -# test_case = LLMTestCase(input="...", actual_output="...", execution_time=4.57) -# execution_time_metric = ExecutionTimeMetric(max_execution_time=5) -# assert_test(test_case, [execution_time_metric]) diff --git a/tests/test_custom_metric.py b/tests/test_custom_metric.py index 407b378e4..9ec77aa9b 100644 --- a/tests/test_custom_metric.py +++ b/tests/test_custom_metric.py @@ -6,32 +6,37 @@ from deepeval import assert_test -class LengthMetric(BaseMetric): - """This metric checks if the output is more than 3 letters""" - - def __init__(self, threshold: int = 3): - self.threshold = threshold +class LatencyMetric(BaseMetric): + # This metric by default checks if the latency is greater than 10 seconds + def __init__(self, max_seconds: float = 10): + self.threshold = max_seconds def measure(self, test_case: LLMTestCase): - # sends to server - text = test_case.actual_output - score = len(text) - self.success = score > self.threshold - # Optional: Logs it to the server - return score + # Set self.success and self.score in the "measure" method + self.success = test_case.execution_time <= self.threshold + if self.success: + self.score = 1 + else: + self.score = 0 + + # You can also set a reason for the score returned. + # This is particularly useful for a score computed using LLMs + self.reason = None + return self.score def is_successful(self): return self.success @property def __name__(self): - return "Length" + return "Latency" def test_length_metric(): - metric = LengthMetric() + metric = LatencyMetric() test_case = LLMTestCase( input="placeholder", actual_output="This is a long sentence that is more than 3 letters", + execution_time=8.3, ) assert_test(test_case, [metric]) From 877842ffc49e3a69dfbe98bf0022f2e93e06f0cf Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 19:12:08 -0800 Subject: [PATCH 10/15] . --- docs/docs/metrics-custom.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/metrics-custom.mdx b/docs/docs/metrics-custom.mdx index afe4bb9cb..229913ed0 100644 --- a/docs/docs/metrics-custom.mdx +++ b/docs/docs/metrics-custom.mdx @@ -39,14 +39,14 @@ class LatencyMetric(BaseMetric): # You can also set a reason for the score returned. # This is particularly useful for a score computed using LLMs - self.reason = "..." + # self.reason = "Too slow!" return self.score def is_successful(self): return self.success @property - def name(self): + def __name__(self): return "Latency" ``` From b1115a1c54ff946fd04aff472c7be21f14684ac5 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 19:17:54 -0800 Subject: [PATCH 11/15] . --- docs/docs/metrics-custom.mdx | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/docs/metrics-custom.mdx b/docs/docs/metrics-custom.mdx index 229913ed0..8609954fa 100644 --- a/docs/docs/metrics-custom.mdx +++ b/docs/docs/metrics-custom.mdx @@ -58,6 +58,17 @@ Notice that a few things has happened: - `self.is_successful()` simply returns the success status - `name()` simply returns a string representing the metric name -To create a custom metric without unexpected errors, we recommend you set the appropriate class variables in the appropriate methods as outlined above. +To create a custom metric without unexpected errors, we recommend you set the appropriate class variables in the appropriate methods as outlined above. You should also note that `self.reason` is **optional**. `self.reason` should be a string representing the rationale behind an LLM computed score. This is only applicable if you're using LLMs as an evaluator in the `measure()` method, and has implemented a way to generate a score reasoning. -You should also note that `self.reason` is **optional**. `self.reason` should be a string representing the rationale behind an LLM computed score. This is only applicable if you're using LLMs as an evaluator in the `measure()` method, and has implemented a way to generate a score reasoning. +After creating a custom metric, you can use it in the same way as all of `deepeval`'s metrics: + +```python +from deepeval import evaluate +from deepeval.test_case import LLMTestCase +... + +# Note that we pass in execution time since the measure method requires it for evaluation +latency_metric = LatencyMetric(max_seconds=10.0) +test_case = LLMTestCase(input="...", actual_output="...", execution_time=8.3) +evaluate([test_case], [latency_metric]) +``` From dcdc07e16f0fa066d170069b66641d5568df92b7 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 19:49:48 -0800 Subject: [PATCH 12/15] . --- docs/docs/evaluation-test-cases.mdx | 49 ++++++++++++++++++++++++++--- docs/docs/metrics-custom.mdx | 14 +++++---- 2 files changed, 53 insertions(+), 10 deletions(-) diff --git a/docs/docs/evaluation-test-cases.mdx b/docs/docs/evaluation-test-cases.mdx index 270f3be2d..4561404d3 100644 --- a/docs/docs/evaluation-test-cases.mdx +++ b/docs/docs/evaluation-test-cases.mdx @@ -13,16 +13,19 @@ A test case is a blueprint provided by `deepeval` to unit test LLM outputs based - [Optional] `expected_output` - [Optional] `context` - [Optional] `retrieval_context` +- [Optional] `execution_time` (`float`) +- [Optional] `cost` (`float`) Except for `actual_output`, all parameters should originate from your evaluation dataset (if you have one). Here's an example implementation of a test case: ```python test_case = LLMTestCase( input="What if these shoes don't fit?", - expected_output = "You're eligible for a 30 day refund at no extra cost.", - actual_output = "We offer a 30-day full refund at no extra cost.", - context = ["All customers are eligible for a 30 day full refund at no extra cost."] - retrieval_context = ["Only shoes can be refunded."] + expected_output="You're eligible for a 30 day refund at no extra cost.", + actual_output="We offer a 30-day full refund at no extra cost.", + context=["All customers are eligible for a 30 day full refund at no extra cost."], + retrieval_context=["Only shoes can be refunded."], + execution_time=10.0 ) ``` @@ -174,6 +177,44 @@ test_case = LLMTestCase( Remember, `context` is the ideal retrieval results for a given input and typically come from your evaluation dataset, whereas `retrieval_context` is your LLM application's actual retrieval results. ::: +## Execution Time + +The `execution_time` is an **optional** parameter that represents how long it took your LLM application to finish executing. However, if you're trying to measure something else, like the execution time for only the retrieval part of your RAG pipeline, feel free to supply that number instead of the overall execution time. + +```python +... + +test_case = LLMTestCase( + input="...", + actual_output="...", + # Replace this with the actual execution time of your LLM application + execution_time=10.4 +) +``` + +:::note +`deepeval` does not offer metrics that evaluate on latency and cost, so feel free to supply the `execution_time` in either seconds, miliseconds, or even nanoseconds. That being said, [here is a full working example](metrics-custom#implementation) of how you can build your own `LatencyMetric` using the `execution_time` parameter. +::: + +## Cost + +The `cost` is an **optional** parameter that represents the token cost for a given execution of your LLM application. However, similar to `execution_time`, the `cost` parameter does not strictly have to be the total completion cost (eg., it could be the embedding cost), nor does it have to be in any set currency. + +```python +... + +test_case = LLMTestCase( + input="...", + actual_output="...", + # Replace this with the actual execution time of your LLM application + cost=0.78 +) +``` + +:::info +`deepeval` does not offer cost and latency metrics because it is difficult to account for all different units and currencies available. We highly encourage you to look at the [custom metrics section](metrics-custom#implementation) for a full example on how to create your own metric if you are looking to evaluate cost and latency. +::: + ## Run A Test Case `deepeval` offers an option to quickly run a test case without going through the CLI. diff --git a/docs/docs/metrics-custom.mdx b/docs/docs/metrics-custom.mdx index 8609954fa..1621deed6 100644 --- a/docs/docs/metrics-custom.mdx +++ b/docs/docs/metrics-custom.mdx @@ -31,15 +31,15 @@ class LatencyMetric(BaseMetric): def measure(self, test_case: LLMTestCase): # Set self.success and self.score in the "measure" method - self.success = len(test_case.execution_time) <= self.threshold + self.success = test_case.execution_time <= self.threshold if self.success: self.score = 1 else: self.score = 0 - # You can also set a reason for the score returned. + # You can also optionally set a reason for the score returned. # This is particularly useful for a score computed using LLMs - # self.reason = "Too slow!" + self.reason = "Too slow!" return self.score def is_successful(self): @@ -52,11 +52,11 @@ class LatencyMetric(BaseMetric): Notice that a few things has happened: -- `self.threshold` was set in `__init__()` +- `self.threshold` was set in `__init__()`, and this can be either a minimum or maximum threshold - `self.success`, `self.score`, and `self.reason` was set in `measure()` - `measure()` takes in an `LLMTestCase` -- `self.is_successful()` simply returns the success status -- `name()` simply returns a string representing the metric name +- `is_successful()` simply returns the success status +- `__name()__` simply returns a string representing the metric name To create a custom metric without unexpected errors, we recommend you set the appropriate class variables in the appropriate methods as outlined above. You should also note that `self.reason` is **optional**. `self.reason` should be a string representing the rationale behind an LLM computed score. This is only applicable if you're using LLMs as an evaluator in the `measure()` method, and has implemented a way to generate a score reasoning. @@ -65,8 +65,10 @@ After creating a custom metric, you can use it in the same way as all of `deepev ```python from deepeval import evaluate from deepeval.test_case import LLMTestCase + ... + # Note that we pass in execution time since the measure method requires it for evaluation latency_metric = LatencyMetric(max_seconds=10.0) test_case = LLMTestCase(input="...", actual_output="...", execution_time=8.3) From 6e63b989c6efb418097f5b9c162e84bd3690b79e Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 19:54:14 -0800 Subject: [PATCH 13/15] Updated docs --- docs/docs/evaluation-test-cases.mdx | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/docs/evaluation-test-cases.mdx b/docs/docs/evaluation-test-cases.mdx index 4561404d3..753690541 100644 --- a/docs/docs/evaluation-test-cases.mdx +++ b/docs/docs/evaluation-test-cases.mdx @@ -158,8 +158,6 @@ prompt_template = """ prompt = prompt_template.format(text="Who's a good boy?") -context = ["Rocky is a good boy."] - # Replace this with the actual retrieved context from your RAG pipeline retrieval_context = ["Rocky is a good cat."] @@ -168,7 +166,6 @@ test_case = LLMTestCase( # Replace this with your actual LLM application actual_output=chatbot.run(prompt), expected_output="Me, ruff!", - context=context, retrieval_context=retrieval_context ) ``` @@ -182,8 +179,6 @@ Remember, `context` is the ideal retrieval results for a given input and typical The `execution_time` is an **optional** parameter that represents how long it took your LLM application to finish executing. However, if you're trying to measure something else, like the execution time for only the retrieval part of your RAG pipeline, feel free to supply that number instead of the overall execution time. ```python -... - test_case = LLMTestCase( input="...", actual_output="...", @@ -201,8 +196,6 @@ test_case = LLMTestCase( The `cost` is an **optional** parameter that represents the token cost for a given execution of your LLM application. However, similar to `execution_time`, the `cost` parameter does not strictly have to be the total completion cost (eg., it could be the embedding cost), nor does it have to be in any set currency. ```python -... - test_case = LLMTestCase( input="...", actual_output="...", From 7b8e1b4143621e5feef90d8136a9c1f67ae8508d Mon Sep 17 00:00:00 2001 From: Jeffrey Ip Date: Thu, 11 Jan 2024 19:59:52 -0800 Subject: [PATCH 14/15] new release --- deepeval/_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deepeval/_version.py b/deepeval/_version.py index 3180b565a..2c7d1d32f 100644 --- a/deepeval/_version.py +++ b/deepeval/_version.py @@ -1 +1 @@ -__version__: str = "0.20.45" +__version__: str = "0.20.46" diff --git a/pyproject.toml b/pyproject.toml index daad81439..0dc7e540d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "deepeval" -version = "0.20.45" +version = "0.20.46" description = "The Evaluation Framework for LLMs" authors = ["Jeffrey Ip "] license = "Apache-2.0" From 6b4e6e9ea7cfb192f3cae9f97ac5528e128560a7 Mon Sep 17 00:00:00 2001 From: Jeffrey Ip <143328635+penguine-ip@users.noreply.github.com> Date: Fri, 12 Jan 2024 09:55:01 -0800 Subject: [PATCH 15/15] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1e4afff12..62fcfc68a 100644 --- a/README.md +++ b/README.md @@ -120,7 +120,7 @@ deepeval test run test_chatbot.py Alternatively, you can evaluate without Pytest, which is more suited for a notebook environment. ```python -from deepeval import evalate +from deepeval import evaluate from deepeval.metrics import HallucinationMetric from deepeval.test_case import LLMTestCase @@ -135,7 +135,7 @@ test_case = LLMTestCase( actual_output=actual_output, context=context ) -evalate([test_case], [hallucination_metric]) +evaluate([test_case], [hallucination_metric]) ``` ## Evaluting a Dataset / Test Cases in Bulk