Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/chore-update-registry1-weekly-bu…
Browse files Browse the repository at this point in the history
…ndle-0.13.0' into 835-upgrade-vllm-for-gptq-bfloat16-inferencing
  • Loading branch information
justinthelaw committed Sep 30, 2024
2 parents 46174ed + f502e06 commit af8c971
Show file tree
Hide file tree
Showing 34 changed files with 511 additions and 157 deletions.
46 changes: 32 additions & 14 deletions src/leapfrogai_api/backend/composer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,25 @@ async def create_chat_messages(
thread: Thread,
additional_instructions: str | None,
tool_resources: BetaThreadToolResources | None = None,
) -> tuple[list[ChatMessage], list[str]]:
) -> tuple[list[ChatMessage], SearchResponse]:
"""Create chat message list for consumption by the LLM backend.
Args:
request (RunCreateParamsRequest): The request object.
session (Session): The database session.
thread (Thread): The thread object.
additional_instructions (str | None): Additional instructions.
tool_resources (BetaThreadToolResources | None): The tool resources.
Returns:
tuple[list[ChatMessage], SearchResponse]: The chat messages and any RAG responses.
"""
# Get existing messages
thread_messages: list[Message] = await self.list_messages(thread.id, session)
rag_responses: SearchResponse = SearchResponse(data=[])

if len(thread_messages) == 0:
return [], []
return [], rag_responses

def sort_by_created_at(msg: Message):
return msg.created_at
Expand Down Expand Up @@ -125,7 +138,6 @@ def sort_by_created_at(msg: Message):
chat_messages.extend(chat_thread_messages)

# 4 - The RAG results are appended behind the user's query
file_ids: set[str] = set()
if request.can_use_rag(tool_resources) and chat_thread_messages:
rag_message: str = "Here are relevant docs needed to reply:\n"

Expand All @@ -138,22 +150,22 @@ def sort_by_created_at(msg: Message):
vector_store_ids: list[str] = cast(list[str], file_search.vector_store_ids)

for vector_store_id in vector_store_ids:
rag_responses: SearchResponse = await query_service.query_rag(
rag_responses = await query_service.query_rag(
query=query_message.content_as_str(),
vector_store_id=vector_store_id,
)

# Insert the RAG response messages just before the user's query
for rag_response in rag_responses.data:
file_ids.add(rag_response.file_id)
response_with_instructions: str = f"{rag_response.content}"
rag_message += f"{response_with_instructions}\n"

chat_messages.insert(
len(chat_messages) - 1, # Insert right before the user message
ChatMessage(role="user", content=rag_message),
) # TODO: Should this go in user or something else like function?
)

return chat_messages, list(file_ids)
return chat_messages, rag_responses

async def generate_message_for_thread(
self,
Expand Down Expand Up @@ -182,7 +194,7 @@ async def generate_message_for_thread(
else:
tool_resources = None

chat_messages, file_ids = await self.create_chat_messages(
chat_messages, rag_responses = await self.create_chat_messages(
request, session, thread, additional_instructions, tool_resources
)

Expand All @@ -204,13 +216,15 @@ async def generate_message_for_thread(

choice: ChatChoice = cast(ChatChoice, chat_response.choices[0])

message = from_text_to_message(choice.message.content_as_str(), file_ids)
message: Message = from_text_to_message(
text=choice.message.content_as_str(), search_responses=rag_responses
)

create_message_request = CreateMessageRequest(
role=message.role,
content=message.content,
attachments=message.attachments,
metadata=message.metadata.__dict__ if message.metadata else None,
metadata=vars(message.metadata),
)

await create_message_request.create_message(
Expand Down Expand Up @@ -249,7 +263,7 @@ async def stream_generate_message_for_thread(
else:
tool_resources = None

chat_messages, file_ids = await self.create_chat_messages(
chat_messages, rag_responses = await self.create_chat_messages(
request, session, thread, additional_instructions, tool_resources
)

Expand All @@ -274,13 +288,15 @@ async def stream_generate_message_for_thread(
yield "\n\n"

# Create an empty message
new_message: Message = from_text_to_message("", [])
new_message: Message = from_text_to_message(
text="", search_responses=SearchResponse(data=[])
)

create_message_request = CreateMessageRequest(
role=new_message.role,
content=new_message.content,
attachments=new_message.attachments,
metadata=new_message.metadata.__dict__ if new_message.metadata else None,
metadata=vars(new_message.metadata),
)

new_message = await create_message_request.create_message(
Expand Down Expand Up @@ -319,7 +335,9 @@ async def stream_generate_message_for_thread(
yield "\n\n"
index += 1

new_message.content = from_text_to_message(response, file_ids).content
new_message.content = from_text_to_message(
text=response, search_responses=rag_responses
).content
new_message.created_at = int(time.time())

crud_message = CRUDMessage(db=session)
Expand Down
47 changes: 35 additions & 12 deletions src/leapfrogai_api/backend/converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from openai.types.beta import AssistantStreamEvent
from openai.types.beta.assistant_stream_event import ThreadMessageDelta
from openai.types.beta.threads.file_citation_annotation import FileCitation
from openai.types.beta.threads.file_path_annotation import FilePathAnnotation
from openai.types.beta.threads import (
MessageContentPartParam,
MessageContent,
Expand All @@ -17,6 +18,9 @@
FileCitationAnnotation,
)

from leapfrogai_api.typedef.vectorstores.search_types import SearchResponse
from leapfrogai_api.typedef.common import MetadataObject


def from_assistant_stream_event_to_str(stream_event: AssistantStreamEvent):
return f"event: {stream_event.event}\ndata: {stream_event.data.model_dump_json()}"
Expand Down Expand Up @@ -44,24 +48,41 @@ def from_content_param_to_content(
)


def from_text_to_message(text: str, file_ids: list[str]) -> Message:
all_file_ids: str = ""
def from_text_to_message(text: str, search_responses: SearchResponse | None) -> Message:
"""Loads text and RAG search responses into a Message object
for file_id in file_ids:
all_file_ids += f" [{file_id}]"
Args:
text: The text to load into the message
search_responses: The RAG search responses to load into the message
message_content: TextContentBlock = TextContentBlock(
text=Text(
annotations=[
Returns:
The OpenAI compliant Message object
"""

all_file_ids: str = ""
all_vector_ids: list[str] = []
annotations: list[FileCitationAnnotation | FilePathAnnotation] = []

if search_responses:
for search_response in search_responses.data:
all_file_ids += f"[{search_response.file_id}]"
all_vector_ids.append(search_response.id)
file_name = search_response.metadata.get("source", "source")
annotations.append(
FileCitationAnnotation(
text=f"[{file_id}]",
file_citation=FileCitation(file_id=file_id, quote=""),
text=f"【4:0†{file_name}】", # TODO: What should these numbers be? https://github.com/defenseunicorns/leapfrogai/issues/1110
file_citation=FileCitation(
file_id=search_response.file_id, quote=search_response.content
),
start_index=0,
end_index=0,
type="file_citation",
)
for file_id in file_ids
],
)

message_content: TextContentBlock = TextContentBlock(
text=Text(
annotations=annotations,
value=text + all_file_ids,
),
type="text",
Expand All @@ -75,7 +96,9 @@ def from_text_to_message(text: str, file_ids: list[str]) -> Message:
thread_id="",
content=[message_content],
role="assistant",
metadata=None,
metadata=MetadataObject(
vector_ids=all_vector_ids.__str__(),
),
)

return new_message
Expand Down
2 changes: 2 additions & 0 deletions src/leapfrogai_api/backend/rag/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ async def index_file(self, vector_store_id: str, file_id: str) -> VectorStoreFil
temp_file.write(file_bytes)
temp_file.seek(0)
documents = await load_file(temp_file.name)
for document in documents:
document.metadata["source"] = file_object.filename
chunks = await split(documents)

if len(chunks) == 0:
Expand Down
35 changes: 25 additions & 10 deletions src/leapfrogai_api/data/crud_vector_content.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,11 @@
"""CRUD Operations for VectorStore."""

from pydantic import BaseModel
from supabase import AClient as AsyncClient
from leapfrogai_api.data.crud_base import get_user_id
import ast
from leapfrogai_api.typedef.vectorstores import SearchItem, SearchResponse
from leapfrogai_api.backend.constants import TOP_K


class Vector(BaseModel):
id: str = ""
vector_store_id: str
file_id: str
content: str
metadata: dict
embedding: list[float]
from leapfrogai_api.typedef.vectorstores import Vector


class CRUDVectorContent:
Expand Down Expand Up @@ -65,6 +56,30 @@ async def add_vectors(self, object_: list[Vector]) -> list[Vector]:
except Exception as e:
raise e

async def get_vector(self, vector_id: str) -> Vector:
"""Get a vector by its ID."""
data, _count = (
await self.db.table(self.table_name)
.select("*")
.eq("id", vector_id)
.single()
.execute()
)

_, response = data

if isinstance(response["embedding"], str):
response["embedding"] = self.string_to_float_list(response["embedding"])

return Vector(
id=response["id"],
vector_store_id=response["vector_store_id"],
file_id=response["file_id"],
content=response["content"],
metadata=response["metadata"],
embedding=response["embedding"],
)

async def delete_vectors(self, vector_store_id: str, file_id: str) -> bool:
"""Delete a vector store file by its ID."""
data, _count = (
Expand Down
22 changes: 22 additions & 0 deletions src/leapfrogai_api/routers/leapfrogai/vector_stores.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from leapfrogai_api.backend.rag.query import QueryService
from leapfrogai_api.typedef.vectorstores import SearchResponse
from leapfrogai_api.routers.supabase_session import Session
from leapfrogai_api.data.crud_vector_content import CRUDVectorContent, Vector
from leapfrogai_api.backend.constants import TOP_K

router = APIRouter(
Expand Down Expand Up @@ -36,3 +37,24 @@ async def search(
vector_store_id=vector_store_id,
k=k,
)


@router.get("/vector/{vector_id}")
async def get_vector(
session: Session,
vector_id: str,
) -> Vector:
"""
Get a specfic vector by its ID.
Args:
session (Session): The database session.
vector_id (str): The ID of the vector.
Returns:
Vector: The vector object.
"""
crud_vector_content = CRUDVectorContent(db=session)
vector = await crud_vector_content.get_vector(vector_id=vector_id)

return vector
5 changes: 4 additions & 1 deletion src/leapfrogai_api/typedef/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
from .common import Usage as Usage
from .common import (
Usage as Usage,
MetadataObject as MetadataObject,
)
11 changes: 11 additions & 0 deletions src/leapfrogai_api/typedef/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,17 @@
from leapfrogai_api.backend.constants import DEFAULT_MAX_COMPLETION_TOKENS


class MetadataObject:
"""A metadata object that can be serialized back to a dict."""

def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)

def __getattr__(self, key):
return self.__dict__.get(key)


class Usage(BaseModel):
"""Usage object."""

Expand Down
1 change: 1 addition & 0 deletions src/leapfrogai_api/typedef/vectorstores/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
ListVectorStoresResponse as ListVectorStoresResponse,
)
from .search_types import (
Vector as Vector,
SearchItem as SearchItem,
SearchResponse as SearchResponse,
)
9 changes: 9 additions & 0 deletions src/leapfrogai_api/typedef/vectorstores/search_types.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
from pydantic import BaseModel, Field


class Vector(BaseModel):
id: str = ""
vector_store_id: str
file_id: str
content: str
metadata: dict
embedding: list[float]


class SearchItem(BaseModel):
"""Object representing a single item in a search result."""

Expand Down
11 changes: 11 additions & 0 deletions src/leapfrogai_ui/src/app.css
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,17 @@
scrollbar-color: #4b5563 #1f2937;
}

/* Override TailwindCSS default Preflight styles for lists in messages */
#message-content-container {
ul {
margin: revert;
padding: revert;
li {
list-style: square;
}
}
}

/*TODO - can we get rid of some of these?*/
@layer utilities {
.content {
Expand Down
18 changes: 10 additions & 8 deletions src/leapfrogai_ui/src/lib/components/Message.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -178,14 +178,16 @@
{#if message.role !== 'user' && !messageText}
<MessagePendingSkeleton size="sm" class="mt-4" darkColor="bg-gray-500" />
{:else}
<!--eslint-disable-next-line svelte/no-at-html-tags -- We use DomPurity to sanitize the code snippet-->
{@html DOMPurify.sanitize(md.render(messageText), {
CUSTOM_ELEMENT_HANDLING: {
tagNameCheck: /^code-block$/,
attributeNameCheck: /^(code|language)$/,
allowCustomizedBuiltInElements: false
}
})}
<div id="message-content-container">
<!--eslint-disable-next-line svelte/no-at-html-tags -- We use DomPurity to sanitize the code snippet-->
{@html DOMPurify.sanitize(md.render(messageText), {
CUSTOM_ELEMENT_HANDLING: {
tagNameCheck: /^code-block$/,
attributeNameCheck: /^(code|language)$/,
allowCustomizedBuiltInElements: false
}
})}
</div>
<div class="flex flex-col items-start">
{#each getCitations(message, $page.data.files) as { component: Component, props }}
<svelte:component this={Component} {...props} />
Expand Down
Loading

0 comments on commit af8c971

Please sign in to comment.