From 2dbfe0c9a1f3d034c3ad72c7dc1683c78411118f Mon Sep 17 00:00:00 2001 From: Rida KEJJI Date: Fri, 31 Jan 2025 16:02:36 +0100 Subject: [PATCH] test: added multi-round tests for azure_openai_model --- test/models/test_azure_openai_model.py | 65 +++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/test/models/test_azure_openai_model.py b/test/models/test_azure_openai_model.py index 7e366f2463..ba508a995f 100644 --- a/test/models/test_azure_openai_model.py +++ b/test/models/test_azure_openai_model.py @@ -22,14 +22,43 @@ """ import re +from unittest.mock import MagicMock, patch import pytest +from openai.types.chat.chat_completion import Choice +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.completion_usage import CompletionUsage from camel.configs import ChatGPTConfig from camel.models import AzureOpenAIModel, ModelFactory -from camel.types import ModelPlatformType, ModelType +from camel.types import ChatCompletion, ModelPlatformType, ModelType from camel.utils import OpenAITokenCounter +model_backend_rsp = ChatCompletion( + id="mock_response_id", + choices=[ + Choice( + finish_reason="stop", + index=0, + logprobs=None, + message=ChatCompletionMessage( + content="This is a mock response content.", + role="assistant", + function_call=None, + tool_calls=None, + ), + ) + ], + created=123456789, + model="gpt-4o-2024-05-13", + object="chat.completion", + usage=CompletionUsage( + completion_tokens=32, + prompt_tokens=15, + total_tokens=47, + ), +) + @pytest.mark.model_backend @pytest.mark.parametrize( @@ -72,6 +101,40 @@ def test_openai_model_create(model_type: ModelType): assert model.model_type == model_type +@pytest.mark.model_backend +@pytest.mark.parametrize( + "model_type", + [ + ModelType.GPT_3_5_TURBO, + ModelType.GPT_4, + ModelType.GPT_4_TURBO, + ModelType.GPT_4O, + ModelType.GPT_4O_MINI, + ], +) +@patch("camel.models.azure_openai_model.AzureOpenAI") +def test_openai_model_run( + mock_azure_openai, model_type: ModelType, max_steps=5 +): + # Mock the client creation function AzureOpenAI + mock_client = MagicMock() + mock_azure_openai.return_value = mock_client + mock_client.chat.completions.create.return_value = model_backend_rsp + + model = ModelFactory.create( + model_platform=ModelPlatformType.AZURE, + model_type=model_type, + model_config_dict=ChatGPTConfig(temperature=0.8, n=3).as_dict(), + ) + for _ in range(max_steps): + model_inference = model.run(messages=[]) + if isinstance(model_inference, ChatCompletion): + assert ( + model_inference.choices[0].message.content + == model_backend_rsp.choices[0].message.content + ) + + @pytest.mark.model_backend def test_openai_model_unexpected_argument(): model_type = ModelType.GPT_4