From 139a07da7e7513951aec4f0d29210d4b8add8a70 Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 12:49:23 -0500 Subject: [PATCH 1/8] Moving to litellm; added Bedrock. --- pyproject.toml | 2 +- src/cwhy/cwhy.py | 88 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 68 insertions(+), 22 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e314ed5..724f71a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [ { name="Nicolas van Kempen", email="nvankemp@gmail.com" }, { name="Bryce Adelstein Lelbach", email="brycelelbach@gmail.com" } ] -dependencies = ["llm_utils==0.2.4", "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0"] +dependencies = ["llm_utils==0.2.4", "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2"] description = "Explains and proposes fixes for compile-time errors for many programming languages." readme = "README.md" requires-python = ">=3.7" diff --git a/src/cwhy/cwhy.py b/src/cwhy/cwhy.py index 6fd9965..03e4742 100755 --- a/src/cwhy/cwhy.py +++ b/src/cwhy/cwhy.py @@ -1,30 +1,74 @@ import argparse +import contextlib +import os import subprocess import sys -import openai - +import litellm import llm_utils -from . import conversation, prompts +litellm.set_verbose=False + +# Turn off most logging +from llm_utils import logging +logging.getLogger().setLevel(logging.ERROR) +from openai import NotFoundError, RateLimitError, APITimeoutError, OpenAIError, BadRequestError +from . import conversation, prompts + +def print_key_info(): + print("You need a key (or keys) from an AI service to use CWhy.") + print() + print("OpenAI:") + print(" You can get a key here: https://platform.openai.com/api-keys") + print(" Set the environment variable OPENAI_API_KEY to your key value.") + print() + print("Bedrock:") + print(" To use Bedrock, you need an AWS account.") + print(" Set the following environment variables:") + print(" export AWS_ACCESS_KEY_ID=") + print(" export AWS_SECRET_ACCESS_KEY=") + print(" export AWS_REGION_NAME=us-west-2") + +# If keys are defined in the environment, we use the appropriate service. +AI_service = None +_DEFAULT_FALLBACK_MODELS = [] + +with contextlib.suppress(KeyError): + if os.environ["OPENAI_API_KEY"]: + AI_service = "OpenAI" + _DEFAULT_FALLBACK_MODELS = ["openai/gpt-4", "openai/gpt-3.5-turbo"] +with contextlib.suppress(KeyError): + if not _DEFAULT_FALLBACK_MODELS: + if os.environ["AWS_ACCESS_KEY_ID"] and os.environ["AWS_SECRET_ACCESS_KEY"] and os.environ["AWS_REGION_NAME"]: + AI_service = "Bedrock" + _DEFAULT_FALLBACK_MODELS = ["bedrock/anthropic.claude-v2:1"] + +# If no AI service was available given the environment variables, print key info and exit. +if not AI_service: + print_key_info() + sys.exit(1) + def complete(client, args, user_prompt, **kwargs): try: - completion = client.chat.completions.create( + completion = litellm.completion( model=args.llm, messages=[{"role": "user", "content": user_prompt}], **kwargs, ) return completion - except openai.NotFoundError as e: + except NotFoundError as e: print(f"'{args.llm}' either does not exist or you do not have access to it.") raise e - except openai.RateLimitError as e: + except BadRequestError as e: + print("Something is wrong with your prompt.") + raise e + except RateLimitError as e: print("You have exceeded a rate limit or have no remaining funds.") raise e - except openai.APITimeoutError as e: - print("The OpenAI API timed out.") + except APITimeoutError as e: + print("The API timed out.") print("You can increase the timeout with the --timeout option.") raise e @@ -77,9 +121,6 @@ def evaluate_diff(client, args, stdin): return completion -_DEFAULT_FALLBACK_MODELS = ["gpt-4", "gpt-3.5-turbo"] - - def evaluate_with_fallback(client, args, stdin): for i, model in enumerate(_DEFAULT_FALLBACK_MODELS): if i != 0: @@ -87,7 +128,7 @@ def evaluate_with_fallback(client, args, stdin): args.llm = model try: return evaluate(client, args, stdin) - except openai.NotFoundError: + except NotFoundError: continue @@ -136,28 +177,33 @@ def main(args: argparse.Namespace) -> None: print("CWhy") print("==================================================") try: - client = openai.OpenAI(timeout=args.timeout) - except openai.OpenAIError: - print("You need an OpenAI key to use this tool.") - print("You can get a key here: https://platform.openai.com/api-keys") - print("Set the environment variable OPENAI_API_KEY to your key value.") + result = evaluate(None, args, process.stderr if process.stderr else process.stdout) + print(result) + except OpenAIError: + print_key_info() sys.exit(1) - print(evaluate(client, args, process.stderr if process.stderr else process.stdout)) print("==================================================") sys.exit(process.returncode) def evaluate_text_prompt(client, args, prompt, wrap=True, **kwargs): + completion = complete(client, args, prompt, **kwargs) + + msg = f"Analysis from {AI_service}:" + print(msg) + print("-" * len(msg)) text = completion.choices[0].message.content if wrap: text = llm_utils.word_wrap_except_code_blocks(text) - cost = llm_utils.calculate_cost( - completion.usage.prompt_tokens, completion.usage.completion_tokens, args.llm - ) + cost = litellm.completion_cost(completion_response=completion) + # llm_utils.calculate_cost( + # completion.usage.prompt_tokens, completion.usage.completion_tokens, args.llm + # ) + text += "\n\n" text += f"(Total cost: approximately ${cost:.2f} USD.)" From 803aa0593c930927ca31f8506cbabd9c1d2f9a7b Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:20:26 -0500 Subject: [PATCH 2/8] New version, with temporary workaround for llm-utils. --- pyproject.toml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 724f71a..489d322 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,13 +4,20 @@ build-backend = "hatchling.build" [project] name = "cwhy" -version = "0.4.2" +version = "0.4.3" authors = [ { name="Emery Berger", email="emery.berger@gmail.com" }, { name="Nicolas van Kempen", email="nvankemp@gmail.com" }, { name="Bryce Adelstein Lelbach", email="brycelelbach@gmail.com" } ] -dependencies = ["llm_utils==0.2.4", "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2"] + +# Work-around until llm_utils can get updated with a new release. +[tool.hatch.metadata] +allow-direct-references = true + +# Replace direct github dependency with "llm_utils>=0.2.4" when updated (see above). + +dependencies = [ "llm_utils @ git+https://github.com/plasma-umass/llm-utils", "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2"] description = "Explains and proposes fixes for compile-time errors for many programming languages." readme = "README.md" requires-python = ">=3.7" From 2b2711289918fe5ce4cfa4068e1dda192f2e6082 Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:26:02 -0500 Subject: [PATCH 3/8] Changed order. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 489d322..ed7f7ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ allow-direct-references = true # Replace direct github dependency with "llm_utils>=0.2.4" when updated (see above). -dependencies = [ "llm_utils @ git+https://github.com/plasma-umass/llm-utils", "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2"] +dependencies = [ "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2", "llm_utils @ git+https://github.com/plasma-umass/llm-utils"] description = "Explains and proposes fixes for compile-time errors for many programming languages." readme = "README.md" requires-python = ">=3.7" From ad41b7d0c7b9c2974c4a514e7945046971d8b92c Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:31:02 -0500 Subject: [PATCH 4/8] Updated. --- pyproject.toml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ed7f7ed..9c75d94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,13 +11,7 @@ authors = [ { name="Bryce Adelstein Lelbach", email="brycelelbach@gmail.com" } ] -# Work-around until llm_utils can get updated with a new release. -[tool.hatch.metadata] -allow-direct-references = true - -# Replace direct github dependency with "llm_utils>=0.2.4" when updated (see above). - -dependencies = [ "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2", "llm_utils @ git+https://github.com/plasma-umass/llm-utils"] +dependencies = [ "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2", "llm-utils>=0.2.5"] description = "Explains and proposes fixes for compile-time errors for many programming languages." readme = "README.md" requires-python = ">=3.7" From e3208ab59cd7b94f00d42adc8c1a63cdb605204c Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:42:04 -0500 Subject: [PATCH 5/8] Added Amazon instructions. --- README.md | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c5d5263..b761c08 100644 --- a/README.md +++ b/README.md @@ -17,9 +17,10 @@ Go, Java, LaTeX, PHP, Python, Ruby, Rust, Swift, and TypeScript. > **Note** > - > CWhy needs to be connected to an [OpenAI account](https://openai.com/api/). _Your account will need to have a - > positive balance for this to work_ ([check your balance](https://platform.openai.com/usage)). - > [Get a key here](https://platform.openai.com/api-keys). + > CWhy needs to be connected to an [OpenAI account](https://openai.com/api/) or an Amazon Web Services account. + > _Your account will need to have a + > positive balance for this to work_ ([check your OpenAI balance](https://platform.openai.com/usage)). + > [Get an OpenAI key here](https://platform.openai.com/api-keys). > > CWhy currently defaults to GPT-4, and falls back to GPT-3.5-turbo if a request error occurs. For the newest and best > model (GPT-4) to work, you need to have purchased at least $1 in credits (if your API account was created before @@ -28,13 +29,26 @@ Go, Java, LaTeX, PHP, Python, Ruby, Rust, Swift, and TypeScript. > Once you have an API key, set it as an environment variable called `OPENAI_API_KEY`. > > ```bash - > # On Linux/MacOs. + > # On Linux/MacOS: > export OPENAI_API_KEY= > - > # On Windows. + > # On Windows: > $env:OPENAI_API_KEY= > ``` - + > + > **New**: CWhy now has alpha support for Amazon Bedrock, using the Claude model. + > To use Bedrock, you need to set three environment variables. + > + > ```bash + > # On Linux/MacOS: + > export AWS_ACCESS_KEY_ID= +> export AWS_SECRET_ACCESS_KEY= +> export AWS_REGION_NAME=us-west-2 +> ``` +> +> CWhy will automatically select which AI service to use (OpenAI or AWS Bedrock) +> when it detects that the appropriate environment variables have been set. + ``` python3 -m pip install cwhy ``` From 035089d3340a43aeeacf99d15853dc6a5d5cbd5a Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:44:40 -0500 Subject: [PATCH 6/8] More clarity for key setting. --- src/cwhy/cwhy.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/cwhy/cwhy.py b/src/cwhy/cwhy.py index 03e4742..eaa5ec1 100755 --- a/src/cwhy/cwhy.py +++ b/src/cwhy/cwhy.py @@ -22,7 +22,8 @@ def print_key_info(): print() print("OpenAI:") print(" You can get a key here: https://platform.openai.com/api-keys") - print(" Set the environment variable OPENAI_API_KEY to your key value.") + print(" Set the environment variable OPENAI_API_KEY to your key value:") + print(" export OPENAI_API_KEY=") print() print("Bedrock:") print(" To use Bedrock, you need an AWS account.") From 237c0cd4f72d7ce5360cf32a1aaaf967c46a8472 Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:47:31 -0500 Subject: [PATCH 7/8] Ignore warning. --- src/cwhy/cwhy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cwhy/cwhy.py b/src/cwhy/cwhy.py index eaa5ec1..8c55e32 100755 --- a/src/cwhy/cwhy.py +++ b/src/cwhy/cwhy.py @@ -4,7 +4,7 @@ import subprocess import sys -import litellm +import litellm # type: ignore import llm_utils litellm.set_verbose=False From 4dbd79ed2fefb85104c51b186366ccf3cbee91d3 Mon Sep 17 00:00:00 2001 From: Emery Berger Date: Mon, 5 Feb 2024 13:54:02 -0500 Subject: [PATCH 8/8] Added setuptools. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9c75d94..1f9bfdb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ authors = [ { name="Bryce Adelstein Lelbach", email="brycelelbach@gmail.com" } ] -dependencies = [ "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2", "llm-utils>=0.2.5"] +dependencies = [ "openai>=1.3.6", "PyYAML>=6.0.1", "rich>=13.7.0", "litellm>=1.22.3", "tiktoken>=0.5.2", "llm-utils>=0.2.5", "setuptools>=68.2.2"] description = "Explains and proposes fixes for compile-time errors for many programming languages." readme = "README.md" requires-python = ">=3.7"