Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Palm, Claude-2, Llama2, CodeLlama (100+LLMs) #129

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions backend/config/utils.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import openai
import litellm

def is_valid_api_key(openai_api_key: str):
openai.api_key = openai_api_key
litellm.api_key = openai_api_key
try:
openai.ChatCompletion.create(
litellm.completion(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."}
Expand Down
9 changes: 5 additions & 4 deletions backend/embeddings/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import List
from gpt4all import GPT4All
import openai
import litellm
import os


Expand All @@ -12,8 +13,8 @@ def openai_ask(prompt: str, context: str = None, pages: List[int] = None, questi
print(pages)
# TODO: make answer to same language
if openai_model.startswith("gpt"):
openai.api_key = openai_api_key
completion = openai.ChatCompletion.create(
litellm.api_key = openai_api_key
completion = litellm.completion(
model=f"{openai_model}",
messages=[
{"role": "user", "content": f"{prompt}"}
Expand All @@ -36,8 +37,8 @@ def openai_ask(prompt: str, context: str = None, pages: List[int] = None, questi

def openai_ask_no_aixplora_brain(question: str, openai_api_key: str = None, openai_model: str = "gpt-3.5-turbo"):
if openai_model.startswith("gpt"):
openai.api_key = openai_api_key
completion = openai.ChatCompletion.create(
litellm.api_key = openai_api_key
completion = litellm.completion(
model=f"{openai_model}",
messages=[
{"role": "user", "content": f"{question}"}
Expand Down
7 changes: 4 additions & 3 deletions backend/llm/summarize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import openai
import litellm

from schemas.question import Document
from database.database import Database
Expand Down Expand Up @@ -72,7 +73,7 @@ def get_summary(self):
text_split = text[words_to_feed * i:words_to_feed * (i + 1)]
time.sleep(0.25)
if self.model.startswith("gpt"):
response = openai.ChatCompletion.create(
response = litellm.completion(
api_key=f"{self.openai_api_key}",
model=f"{self.openai_model}",
temperature=0.2,
Expand Down Expand Up @@ -100,7 +101,7 @@ def get_summary(self):
raise ("The summary is still too long. Please try again.")
count_token = num_tokens_from_string("".join(summary), "cl100k_base")
if self.model.startswith("gpt"):
response = openai.ChatCompletion.create(
response = litellm.completion(
api_key=f"{self.openai_api_key}",
model=f"{self.openai_model}",
temperature=0.2,
Expand All @@ -121,7 +122,7 @@ def get_summary(self):
return {"summary": response["choices"][0]["message"]["content"], "summary_list": "<hr>".join(summary)}
else:
if self.model.startswith("gpt"):
response = openai.ChatCompletion.create(
response = litellm.completion(
api_key=f"{self.openai_api_key}",
model=f"{self.openai_model}",
temperature=0.2,
Expand Down
1 change: 1 addition & 0 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ idna==3.4
iniconfig==2.0.0
joblib==1.2.0
langchain==0.0.178
litellm>=0.1.574
lxml==4.9.2
lz4==4.3.2
marshmallow==3.19.0
Expand Down