diff --git a/gptcache/adapter/openai.py b/gptcache/adapter/openai.py index 2c1c01c1..8fa5dd64 100644 --- a/gptcache/adapter/openai.py +++ b/gptcache/adapter/openai.py @@ -39,7 +39,7 @@ def llm_handler(cls, *llm_args, **llm_kwargs): @staticmethod def update_cache_callback(llm_data, update_cache_func): if not isinstance(llm_data, Iterator): - update_cache_func(Answer(get_message_from_openai_answer(llm_data)), AnswerType.STR) + update_cache_func(Answer(get_message_from_openai_answer(llm_data), AnswerType.STR)) return llm_data else: diff --git a/gptcache/utils/response.py b/gptcache/utils/response.py index 8f587bb9..da0878f5 100644 --- a/gptcache/utils/response.py +++ b/gptcache/utils/response.py @@ -1,6 +1,7 @@ import base64 import requests + def get_message_from_openai_answer(openai_resp): return openai_resp["choices"][0]["message"]["content"] diff --git a/setup.py b/setup.py index 7095843c..159258ee 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ def parse_requirements(file_name: str) -> List[str]: setuptools.setup( name="gptcache", packages=find_packages(), - version="0.1.13", + version="0.1.14", author="SimFG", author_email="bang.fu@zilliz.com", description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat " diff --git a/tests/unit_tests/adapter/test_openai.py b/tests/unit_tests/adapter/test_openai.py index 7cb729bd..156d64c2 100644 --- a/tests/unit_tests/adapter/test_openai.py +++ b/tests/unit_tests/adapter/test_openai.py @@ -6,7 +6,7 @@ get_image_from_openai_b64, get_image_from_path, get_image_from_openai_url, - get_audio_text_from_openai_answer + get_audio_text_from_openai_answer, ) from gptcache.adapter import openai from gptcache import cache @@ -17,14 +17,56 @@ import base64 from urllib.request import urlopen from io import BytesIO + try: from PIL import Image except ModuleNotFoundError: from gptcache.utils.dependency_control import prompt_install + prompt_install("pillow") from PIL import Image +def test_normal_openai(): + cache.init() + question = "calculate 1+3" + expect_answer = "the result is 4" + with patch("openai.ChatCompletion.create") as mock_create: + datas = { + "choices": [ + { + "message": {"content": expect_answer, "role": "assistant"}, + "finish_reason": "stop", + "index": 0, + } + ], + "created": 1677825464, + "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + "model": "gpt-3.5-turbo-0301", + "object": "chat.completion.chunk", + } + mock_create.return_value = datas + + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": question}, + ], + ) + + assert get_message_from_openai_answer(response) == expect_answer, response + + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": question}, + ], + ) + answer_text = get_message_from_openai_answer(response) + assert answer_text == expect_answer, answer_text + def test_stream_openai(): cache.init() @@ -106,42 +148,29 @@ def test_completion(): with patch("openai.Completion.create") as mock_create: mock_create.return_value = { - "choices": [ - {"text": expect_answer, - "finish_reason": None, - "index": 0} - ], + "choices": [{"text": expect_answer, "finish_reason": None, "index": 0}], "created": 1677825464, "id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "model": "text-davinci-003", "object": "text_completion", } - response = openai.Completion.create( - model="text-davinci-003", - prompt=question - ) + response = openai.Completion.create(model="text-davinci-003", prompt=question) answer_text = get_text_from_openai_answer(response) assert answer_text == expect_answer - response = openai.Completion.create( - model="text-davinci-003", - prompt=question - ) + response = openai.Completion.create(model="text-davinci-003", prompt=question) answer_text = get_text_from_openai_answer(response) assert answer_text == expect_answer def test_image_create(): cache.init(pre_embedding_func=get_prompt) - prompt1 = "test url"# bytes - test_url = "https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png" - test_response = { - "created": 1677825464, - "data": [ - {"url": test_url} - ] - } + prompt1 = "test url" # bytes + test_url = ( + "https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png" + ) + test_response = {"created": 1677825464, "data": [{"url": test_url}]} prompt2 = "test base64" img_bytes = base64.b64decode(get_image_from_openai_url(test_response)) img_file = BytesIO(img_bytes) # convert image to file-like object @@ -155,24 +184,18 @@ def test_image_create(): with patch("openai.Image.create") as mock_create_b64: mock_create_b64.return_value = { "created": 1677825464, - "data": [ - {'b64_json': expected_img_data} - ] - } + "data": [{"b64_json": expected_img_data}], + } response = openai.Image.create( - prompt=prompt1, - size="256x256", - response_format="b64_json" + prompt=prompt1, size="256x256", response_format="b64_json" ) img_returned = get_image_from_openai_b64(response) assert img_returned == expected_img_data response = openai.Image.create( - prompt=prompt1, - size="256x256", - response_format="b64_json" - ) + prompt=prompt1, size="256x256", response_format="b64_json" + ) img_returned = get_image_from_openai_b64(response) assert img_returned == expected_img_data @@ -180,24 +203,18 @@ def test_image_create(): with patch("openai.Image.create") as mock_create_url: mock_create_url.return_value = { "created": 1677825464, - "data": [ - {'url': test_url} - ] - } + "data": [{"url": test_url}], + } response = openai.Image.create( - prompt=prompt2, - size="256x256", - response_format="url" + prompt=prompt2, size="256x256", response_format="url" ) answer_url = response["data"][0]["url"] assert test_url == answer_url - + response = openai.Image.create( - prompt=prompt2, - size="256x256", - response_format="url" - ) + prompt=prompt2, size="256x256", response_format="url" + ) img_returned = get_image_from_path(response) assert img_returned == expected_img_data os.remove(response["data"][0]["url"]) @@ -207,52 +224,42 @@ def test_audio_transcribe(): cache.init(pre_embedding_func=get_file_bytes) url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3" audio_file = urlopen(url) - expect_answer = "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, " \ - "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill" + expect_answer = ( + "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, " + "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill" + ) with patch("openai.Audio.transcribe") as mock_create: - mock_create.return_value = { - "text": expect_answer - } + mock_create.return_value = {"text": expect_answer} - response = openai.Audio.transcribe( - model="whisper-1", - file=audio_file - ) + response = openai.Audio.transcribe(model="whisper-1", file=audio_file) answer_text = get_audio_text_from_openai_answer(response) assert answer_text == expect_answer - response = openai.Audio.transcribe( - model="whisper-1", - file=audio_file - ) + response = openai.Audio.transcribe(model="whisper-1", file=audio_file) answer_text = get_audio_text_from_openai_answer(response) assert answer_text == expect_answer def test_audio_translate(): - cache.init(pre_embedding_func=get_file_bytes, - data_manager=get_data_manager(data_path="data_map1.txt")) + cache.init( + pre_embedding_func=get_file_bytes, + data_manager=get_data_manager(data_path="data_map1.txt"), + ) url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3" audio_file = urlopen(url) - expect_answer = "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, " \ - "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill" + expect_answer = ( + "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, " + "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill" + ) with patch("openai.Audio.translate") as mock_create: - mock_create.return_value = { - "text": expect_answer - } + mock_create.return_value = {"text": expect_answer} - response = openai.Audio.translate( - model="whisper-1", - file=audio_file - ) + response = openai.Audio.translate(model="whisper-1", file=audio_file) answer_text = get_audio_text_from_openai_answer(response) assert answer_text == expect_answer - response = openai.Audio.translate( - model="whisper-1", - file=audio_file - ) + response = openai.Audio.translate(model="whisper-1", file=audio_file) answer_text = get_audio_text_from_openai_answer(response) assert answer_text == expect_answer