diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 5e6a0ab7de77..49ba6da1b292 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- "core": "0.0.21"
+ "core": "0.0.22"
}
\ No newline at end of file
diff --git a/README.md b/README.md
index d58c863dc747..c877bc27579a 100644
--- a/README.md
+++ b/README.md
@@ -20,7 +20,7 @@ Quivr, helps you build your second brain, utilizes the power of GenerativeAI to
>We take care of the RAG so you can focus on your product. Simply install quivr-core and add it to your project. You can now ingest your files and ask questions.*
-**We will be improving the RAG and adding more features everything, stay tuned!**
+**We will be improving the RAG and adding more features, stay tuned!**
This is the core of Quivr, the brain of Quivr.com.
@@ -53,7 +53,7 @@ Ensure you have the following installed:
- **Step 2**: Create a RAG with 5 lines of code
-```python
+ ```python
import tempfile
from quivr_core import Brain
@@ -72,7 +72,7 @@ Ensure you have the following installed:
"what is gold? asnwer in french"
)
print("answer:", answer)
-```
+ ```
## Examples
diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md
index bc168bfa099d..1970224221f7 100644
--- a/core/CHANGELOG.md
+++ b/core/CHANGELOG.md
@@ -1,5 +1,12 @@
# Changelog
+## [0.0.22](https://github.com/QuivrHQ/quivr/compare/core-0.0.21...core-0.0.22) (2024-10-21)
+
+
+### Features
+
+* **ask:** non-streaming now calls streaming ([#3409](https://github.com/QuivrHQ/quivr/issues/3409)) ([e71e46b](https://github.com/QuivrHQ/quivr/commit/e71e46bcdfbab0d583aef015604278343fd46c6f))
+
## [0.0.21](https://github.com/QuivrHQ/quivr/compare/core-0.0.20...core-0.0.21) (2024-10-21)
diff --git a/core/pyproject.toml b/core/pyproject.toml
index 5f6dd746ccb1..cd77db6bcc2f 100644
--- a/core/pyproject.toml
+++ b/core/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "quivr-core"
-version = "0.0.21"
+version = "0.0.22"
description = "Quivr core RAG package"
authors = [
{ name = "Stan Girard", email = "stan@quivr.app" }
diff --git a/core/quivr_core/brain/brain.py b/core/quivr_core/brain/brain.py
index 53eb6c6d7142..228814cde8f4 100644
--- a/core/quivr_core/brain/brain.py
+++ b/core/quivr_core/brain/brain.py
@@ -545,34 +545,28 @@ def ask(
print(answer.answer)
```
"""
- llm = self.llm
-
- # If you passed a different llm model we'll override the brain one
- if retrieval_config:
- if retrieval_config.llm_config != self.llm.get_config():
- llm = LLMEndpoint.from_config(config=retrieval_config.llm_config)
- else:
- retrieval_config = RetrievalConfig(llm_config=self.llm.get_config())
-
- if rag_pipeline is None:
- rag_pipeline = QuivrQARAGLangGraph
-
- rag_instance = rag_pipeline(
- retrieval_config=retrieval_config, llm=llm, vector_store=self.vector_db
- )
+ async def collect_streamed_response():
+ full_answer = ""
+ async for response in self.ask_streaming(
+ question=question,
+ retrieval_config=retrieval_config,
+ rag_pipeline=rag_pipeline,
+ list_files=list_files,
+ chat_history=chat_history
+ ):
+ full_answer += response.answer
+ return full_answer
+
+ # Run the async function in the event loop
+ loop = asyncio.get_event_loop()
+ full_answer = loop.run_until_complete(collect_streamed_response())
chat_history = self.default_chat if chat_history is None else chat_history
- list_files = [] if list_files is None else list_files
-
- parsed_response = rag_instance.answer(
- question=question, history=chat_history, list_files=list_files
- )
-
chat_history.append(HumanMessage(content=question))
- chat_history.append(AIMessage(content=parsed_response.answer))
+ chat_history.append(AIMessage(content=full_answer))
- # Save answer to the chat history
- return parsed_response
+ # Return the final response
+ return ParsedRAGResponse(answer=full_answer)
async def ask_streaming(
self,
@@ -635,3 +629,4 @@ async def ask_streaming(
chat_history.append(HumanMessage(content=question))
chat_history.append(AIMessage(content=full_answer))
yield response
+
diff --git a/core/quivr_core/config.py b/core/quivr_core/config.py
index 3cadbc9fcd1c..47b7f9ecd2b4 100644
--- a/core/quivr_core/config.py
+++ b/core/quivr_core/config.py
@@ -233,7 +233,7 @@ class LLMEndpointConfig(QuivrBaseConfig):
Attributes:
supplier (DefaultModelSuppliers): The LLM provider (default: OPENAI).
- model (str): The specific model to use (default: "gpt-3.5-turbo-0125").
+ model (str): The specific model to use (default: "gpt-4o").
context_length (int | None): The maximum context length for the model.
tokenizer_hub (str | None): The tokenizer to use for this model.
llm_base_url (str | None): Base URL for the LLM API.
@@ -247,7 +247,7 @@ class LLMEndpointConfig(QuivrBaseConfig):
"""
supplier: DefaultModelSuppliers = DefaultModelSuppliers.OPENAI
- model: str = "gpt-3.5-turbo-0125"
+ model: str = "gpt-4o"
context_length: int | None = None
tokenizer_hub: str | None = None
llm_base_url: str | None = None
diff --git a/core/quivr_core/utils.py b/core/quivr_core/utils.py
index 0ff992ec4589..3239db10456a 100644
--- a/core/quivr_core/utils.py
+++ b/core/quivr_core/utils.py
@@ -26,7 +26,7 @@ def model_supports_function_calling(model_name: str):
"gpt-4",
"gpt-4-1106-preview",
"gpt-4-0613",
- "gpt-3.5-turbo-0125",
+ "gpt-4o",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0613",
"gpt-4-0125-preview",
diff --git a/core/tests/rag_config.yaml b/core/tests/rag_config.yaml
index 3a4a5214ca27..3c43d0cba099 100644
--- a/core/tests/rag_config.yaml
+++ b/core/tests/rag_config.yaml
@@ -27,7 +27,7 @@ retrieval_config:
supplier: "openai"
# The model to use for the LLM for the given supplier
- model: "gpt-3.5-turbo-0125"
+ model: "gpt-4o"
max_input_tokens: 2000
diff --git a/core/tests/rag_config_workflow.yaml b/core/tests/rag_config_workflow.yaml
index a1750cf61554..f566299cb0a3 100644
--- a/core/tests/rag_config_workflow.yaml
+++ b/core/tests/rag_config_workflow.yaml
@@ -40,7 +40,7 @@ retrieval_config:
supplier: "openai"
# The model to use for the LLM for the given supplier
- model: "gpt-3.5-turbo-0125"
+ model: "gpt-4o"
max_input_tokens: 2000
diff --git a/core/tests/test_config.py b/core/tests/test_config.py
index 1cf06dc5228d..c7d2320cbd9d 100644
--- a/core/tests/test_config.py
+++ b/core/tests/test_config.py
@@ -5,7 +5,7 @@ def test_default_llm_config():
config = LLMEndpointConfig()
assert config.model_dump(exclude={"llm_api_key"}) == LLMEndpointConfig(
- model="gpt-3.5-turbo-0125",
+ model="gpt-4o",
llm_base_url=None,
llm_api_key=None,
max_input_tokens=2000,
diff --git a/docs/docs/index.md b/docs/docs/index.md
index dd90130b4957..d8ee34909903 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -1,41 +1,68 @@
# Welcome to Quivr Documentation
-Welcome to the documentation of Quivr! This is the place where you'll find help, guidance and support for collaborative software development. Whether you're involved in an open-source community or a large software team, these resources should get you up and running quickly!
+Quivr, helps you build your second brain, utilizes the power of GenerativeAI to be your personal assistant !
-[Quivr](https://quivr.app) is your **Second Brain** that can act as your **personal assistant**. Quivr is a platform that enables the creation of AI assistants, referred to as "Brain". These assistants are designed with specialized capabilities. Some can connect to specific data sources, allowing users to interact directly with the data. Others serve as specialized tools for particular use cases, powered by Rag technology. These tools process specific inputs to generate practical outputs, such as summaries, translations, and more.
+## Key Features 🎯
-## Quick Links
+- **Opiniated RAG**: We created a RAG that is opinionated, fast and efficient so you can focus on your product
+- **LLMs**: Quivr works with any LLM, you can use it with OpenAI, Anthropic, Mistral, Gemma, etc.
+- **Any File**: Quivr works with any file, you can use it with PDF, TXT, Markdown, etc and even add your own parsers.
+- **Customize your RAG**: Quivr allows you to customize your RAG, add internet search, add tools, etc.
+- **Integrations with Megaparse**: Quivr works with [Megaparse](https://github.com/quivrhq/megaparse), so you can ingest your files with Megaparse and use the RAG with Quivr.
-- [Video Installation](https://dub.sh/quivr-demo)
+>We take care of the RAG so you can focus on your product. Simply install quivr-core and add it to your project. You can now ingest your files and ask questions.*
-!!! note
- **Our goal** is to make Quivr the **best personal assistant** that is powered by your knowledge and your applications 🔥
+**We will be improving the RAG and adding more features everything, stay tuned!**
-## What does it do?
-
-
-
+This is the core of Quivr, the brain of Quivr.com.
-## How to get started? 👀
+
-1. **Create an account**: Go to [Quivr](https://quivr.app) and create an account.
-2. **Create a Brain**: Let us guide you to create your first brain!
-3. **Feed this Brain**: Add documentation and/or URLs to feed your brain.
-4. **Ask Questions to your Brain**: Ask your Brain questions about the knowledge that you provide.
+## Getting Started 🚀
-## Empowering Innovation with Foundation Models & Generative AI
+You can find everything on the [documentation](https://core.quivr.app/).
-As a Leader in AI, Quivr leverages Foundation Models and Generative AI to empower businesses to achieve gains through Innovation.
+### Prerequisites 📋
-- 50k+ users
-- 6k+ companies
-- 35k+ github stars
-- Top 100 open-source
+Ensure you have the following installed:
+
+- Python 3.10 or newer
+
+### 30 seconds Installation 💽
+
+
+- **Step 1**: Install the package
+
+
+
+ ```bash
+ pip install quivr-core # Check that the installation worked
+ ```
+
+
+- **Step 2**: Create a RAG with 5 lines of code
+
+ ```python
+ import tempfile
+
+ from quivr_core import Brain
+
+ if __name__ == "__main__":
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as temp_file:
+ temp_file.write("Gold is a liquid of blue-like colour.")
+ temp_file.flush()
+
+ brain = Brain.from_files(
+ name="test_brain",
+ file_paths=[temp_file.name],
+ )
+
+ answer = brain.ask(
+ "what is gold? asnwer in french"
+ )
+ print("answer:", answer)
+ ```
diff --git a/docs/docs/installation.md b/docs/docs/installation.md
deleted file mode 100644
index 4f7cbfa546ca..000000000000
--- a/docs/docs/installation.md
+++ /dev/null
@@ -1,5 +0,0 @@
-## Installation
-
-```bash
-pip install quivr-core
-```
\ No newline at end of file
diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md
index 8ea302544929..25a67343e123 100644
--- a/docs/docs/quickstart.md
+++ b/docs/docs/quickstart.md
@@ -70,13 +70,13 @@ brain = Brain.from_files(name = "my smart brain",
Note : [Embeddings](https://python.langchain.com/docs/integrations/text_embedding/) is a langchain class that lets you chose from a large variety of embedding models. Please check out the following docs to know the panel of models you can try.
-## Launch with Streamlit
+## Launch with Chainlit
If you want to quickly launch an interface with streamlit, you can simply do at the root of the project :
```bash
cd examples/chatbot /
rye sync /
-rye run streamlit run examples/chatbot/main.py /
+rye run chainlit run chainlit.py
```
For more detail, go in [examples/chatbot/chainlit.md](https://github.com/QuivrHQ/quivr/tree/main/examples/chatbot)
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 66753d288ae8..5614b8bc6942 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -45,6 +45,7 @@ theme:
name: Switch to system preference
plugins:
+ - search
- mkdocstrings:
default_handler: python
handlers:
@@ -58,7 +59,6 @@ plugins:
nav:
- Home:
- index.md
- - installation.md
- quickstart.md
- Brain:
- brain/index.md
diff --git a/examples/chatbot/requirements-dev.lock b/examples/chatbot/requirements-dev.lock
index 8a856b6ccbed..69268a3f99f7 100644
--- a/examples/chatbot/requirements-dev.lock
+++ b/examples/chatbot/requirements-dev.lock
@@ -17,8 +17,6 @@ aiohappyeyeballs==2.4.3
aiohttp==3.10.10
# via langchain
# via langchain-community
- # via llama-index-core
- # via llama-index-legacy
aiosignal==1.3.1
# via aiohttp
alembic==1.13.3
@@ -29,8 +27,6 @@ annotated-types==0.7.0
# via pydantic
anthropic==0.36.1
# via langchain-anthropic
-antlr4-python3-runtime==4.9.3
- # via omegaconf
anyio==4.6.2.post1
# via anthropic
# via asyncer
@@ -45,11 +41,6 @@ attrs==23.2.0
# via jsonschema
# via referencing
# via sagemaker
-backoff==2.2.1
- # via unstructured
-beautifulsoup4==4.12.3
- # via llama-index-readers-file
- # via unstructured
bidict==0.23.1
# via python-socketio
blinker==1.8.2
@@ -69,38 +60,23 @@ certifi==2024.8.30
# via httpcore
# via httpx
# via requests
-cffi==1.17.1
- # via cryptography
chainlit==1.2.0
-chardet==5.2.0
- # via unstructured
charset-normalizer==3.4.0
- # via pdfminer-six
# via requests
chevron==0.14.0
# via literalai
click==8.1.7
# via chainlit
# via flask
- # via llama-parse
# via mlflow-skinny
- # via nltk
- # via python-oxmsg
# via uvicorn
cloudpickle==2.2.1
# via mlflow-skinny
# via sagemaker
-cobble==0.1.4
- # via mammoth
cohere==5.11.0
# via langchain-cohere
-coloredlogs==15.0.1
- # via onnxruntime
contourpy==1.3.0
# via matplotlib
-cryptography==43.0.1
- # via pdfminer-six
- # via unstructured-client
cycler==0.12.1
# via matplotlib
databricks-sdk==0.34.0
@@ -108,41 +84,22 @@ databricks-sdk==0.34.0
dataclasses-json==0.6.7
# via chainlit
# via langchain-community
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured
defusedxml==0.7.1
# via langchain-anthropic
deprecated==1.2.14
- # via llama-index-core
- # via llama-index-legacy
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-exporter-otlp-proto-http
# via opentelemetry-semantic-conventions
- # via pikepdf
dill==0.3.9
# via multiprocess
# via pathos
-dirtyjson==1.0.8
- # via llama-index-core
- # via llama-index-legacy
distro==1.9.0
# via anthropic
# via openai
docker==7.1.0
# via mlflow
# via sagemaker
-docx2txt==0.8
- # via quivr-core
-effdet==0.4.1
- # via unstructured
-emoji==2.14.0
- # via unstructured
-et-xmlfile==1.1.0
- # via openpyxl
-eval-type-backport==0.2.0
- # via unstructured-client
faiss-cpu==1.9.0
# via quivr-core
fastapi==0.112.4
@@ -151,46 +108,27 @@ fastavro==1.9.7
# via cohere
filelock==3.16.1
# via huggingface-hub
- # via torch
# via transformers
- # via triton
filetype==1.2.0
# via chainlit
- # via unstructured
-fire==0.7.0
- # via pdf2docx
flask==3.0.3
# via mlflow
-flatbuffers==24.3.25
- # via onnxruntime
fonttools==4.54.1
# via matplotlib
- # via pdf2docx
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.9.0
# via huggingface-hub
- # via llama-index-core
- # via llama-index-legacy
- # via torch
gitdb==4.0.11
# via gitpython
gitpython==3.1.43
# via mlflow-skinny
-google-api-core==2.21.0
- # via google-cloud-vision
google-auth==2.35.0
# via databricks-sdk
- # via google-api-core
- # via google-cloud-vision
-google-cloud-vision==3.7.4
- # via unstructured
google-pasta==0.2.0
# via sagemaker
googleapis-common-protos==1.65.0
- # via google-api-core
- # via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-exporter-otlp-proto-http
graphene==3.3
@@ -203,11 +141,7 @@ graphql-relay==3.2.0
greenlet==3.1.1
# via sqlalchemy
grpcio==1.67.0
- # via google-api-core
- # via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
-grpcio-status==1.62.3
- # via google-api-core
gunicorn==23.0.0
# via mlflow
h11==0.14.0
@@ -223,22 +157,14 @@ httpx==0.27.2
# via langgraph-sdk
# via langsmith
# via literalai
- # via llama-cloud
- # via llama-index-core
- # via llama-index-legacy
# via openai
# via quivr-core
- # via unstructured-client
httpx-sse==0.4.0
# via cohere
# via langgraph-sdk
huggingface-hub==0.25.2
- # via timm
# via tokenizers
# via transformers
- # via unstructured-inference
-humanfriendly==10.0
- # via coloredlogs
idna==3.10
# via anyio
# via httpx
@@ -249,14 +175,11 @@ importlib-metadata==6.11.0
# via opentelemetry-api
# via sagemaker
# via sagemaker-core
-iopath==0.1.10
- # via layoutparser
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via mlflow
- # via torch
jiter==0.6.1
# via anthropic
# via openai
@@ -264,12 +187,9 @@ jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
- # via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain-core
-jsonpath-python==1.0.6
- # via unstructured-client
jsonpointer==3.0.0
# via jsonpatch
jsonschema==4.23.0
@@ -281,14 +201,13 @@ kiwisolver==1.4.7
# via matplotlib
langchain==0.2.16
# via langchain-community
- # via megaparse
# via quivr-core
langchain-anthropic==0.1.23
# via quivr-core
langchain-cohere==0.2.4
+ # via quivr-core
langchain-community==0.2.17
# via langchain-experimental
- # via megaparse
# via quivr-core
langchain-core==0.2.41
# via langchain
@@ -300,16 +219,13 @@ langchain-core==0.2.41
# via langchain-text-splitters
# via langgraph
# via langgraph-checkpoint
- # via megaparse
# via quivr-core
langchain-experimental==0.0.65
# via langchain-cohere
langchain-openai==0.1.25
- # via megaparse
+ # via quivr-core
langchain-text-splitters==0.2.4
# via langchain
-langdetect==1.0.9
- # via unstructured
langgraph==0.2.38
# via quivr-core
langgraph-checkpoint==2.0.1
@@ -320,74 +236,14 @@ langsmith==0.1.135
# via langchain
# via langchain-community
# via langchain-core
-layoutparser==0.3.4
- # via unstructured-inference
lazify==0.4.0
# via chainlit
literalai==0.0.607
# via chainlit
-llama-cloud==0.1.2
- # via llama-index-indices-managed-llama-cloud
-llama-index==0.11.18
- # via megaparse
-llama-index-agent-openai==0.3.4
- # via llama-index
- # via llama-index-program-openai
-llama-index-cli==0.3.1
- # via llama-index
-llama-index-core==0.11.18
- # via llama-index
- # via llama-index-agent-openai
- # via llama-index-cli
- # via llama-index-embeddings-openai
- # via llama-index-indices-managed-llama-cloud
- # via llama-index-llms-openai
- # via llama-index-multi-modal-llms-openai
- # via llama-index-program-openai
- # via llama-index-question-gen-openai
- # via llama-index-readers-file
- # via llama-index-readers-llama-parse
- # via llama-parse
-llama-index-embeddings-openai==0.2.5
- # via llama-index
- # via llama-index-cli
-llama-index-indices-managed-llama-cloud==0.4.0
- # via llama-index
-llama-index-legacy==0.9.48.post3
- # via llama-index
-llama-index-llms-openai==0.2.15
- # via llama-index
- # via llama-index-agent-openai
- # via llama-index-cli
- # via llama-index-multi-modal-llms-openai
- # via llama-index-program-openai
- # via llama-index-question-gen-openai
-llama-index-multi-modal-llms-openai==0.2.2
- # via llama-index
-llama-index-program-openai==0.2.0
- # via llama-index
- # via llama-index-question-gen-openai
-llama-index-question-gen-openai==0.2.0
- # via llama-index
-llama-index-readers-file==0.2.2
- # via llama-index
-llama-index-readers-llama-parse==0.3.0
- # via llama-index
-llama-parse==0.5.9
- # via llama-index-readers-llama-parse
- # via megaparse
-lxml==5.3.0
- # via pikepdf
- # via python-docx
- # via python-pptx
- # via unstructured
mako==1.3.5
# via alembic
-mammoth==1.8.0
- # via megaparse
markdown==3.7
# via mlflow
- # via unstructured
markdown-it-py==3.0.0
# via rich
markupsafe==3.0.1
@@ -398,20 +254,14 @@ marshmallow==3.22.0
# via dataclasses-json
matplotlib==3.9.2
# via mlflow
- # via pycocotools
- # via unstructured-inference
mdurl==0.1.2
# via markdown-it-py
-megaparse==0.0.31
- # via quivr-core
mlflow==2.17.0
# via sagemaker-mlflow
mlflow-skinny==2.17.0
# via mlflow
mock==4.0.3
# via sagemaker-core
-mpmath==1.3.0
- # via sympy
msgpack==1.1.0
# via langgraph-checkpoint
multidict==6.1.0
@@ -423,94 +273,22 @@ mypy-extensions==1.0.0
# via typing-inspect
nest-asyncio==1.6.0
# via chainlit
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured-client
-networkx==3.4.1
- # via llama-index-core
- # via llama-index-legacy
- # via torch
- # via unstructured
-nltk==3.9.1
- # via llama-index
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured
numpy==1.26.4
# via chainlit
# via contourpy
# via faiss-cpu
# via langchain
# via langchain-community
- # via layoutparser
- # via llama-index-core
- # via llama-index-legacy
# via matplotlib
# via mlflow
- # via onnx
- # via onnxruntime
- # via opencv-python
- # via opencv-python-headless
# via pandas
- # via pdf2docx
# via pyarrow
- # via pycocotools
# via sagemaker
# via scikit-learn
# via scipy
- # via torchvision
# via transformers
- # via unstructured
-nvidia-cublas-cu12==12.1.3.1
- # via nvidia-cudnn-cu12
- # via nvidia-cusolver-cu12
- # via torch
-nvidia-cuda-cupti-cu12==12.1.105
- # via torch
-nvidia-cuda-nvrtc-cu12==12.1.105
- # via torch
-nvidia-cuda-runtime-cu12==12.1.105
- # via torch
-nvidia-cudnn-cu12==9.1.0.70
- # via torch
-nvidia-cufft-cu12==11.0.2.54
- # via torch
-nvidia-curand-cu12==10.3.2.106
- # via torch
-nvidia-cusolver-cu12==11.4.5.107
- # via torch
-nvidia-cusparse-cu12==12.1.0.106
- # via nvidia-cusolver-cu12
- # via torch
-nvidia-nccl-cu12==2.20.5
- # via torch
-nvidia-nvjitlink-cu12==12.6.77
- # via nvidia-cusolver-cu12
- # via nvidia-cusparse-cu12
-nvidia-nvtx-cu12==12.1.105
- # via torch
-olefile==0.47
- # via python-oxmsg
-omegaconf==2.3.0
- # via effdet
-onnx==1.17.0
- # via unstructured
- # via unstructured-inference
-onnxruntime==1.19.2
- # via unstructured-inference
openai==1.51.2
# via langchain-openai
- # via llama-index-agent-openai
- # via llama-index-embeddings-openai
- # via llama-index-legacy
- # via llama-index-llms-openai
-opencv-python==4.10.0.84
- # via layoutparser
- # via unstructured-inference
-opencv-python-headless==4.10.0.84
- # via pdf2docx
-openpyxl==3.1.5
- # via unstructured
opentelemetry-api==1.27.0
# via mlflow-skinny
# via opentelemetry-exporter-otlp-proto-grpc
@@ -554,78 +332,35 @@ packaging==23.2
# via marshmallow
# via matplotlib
# via mlflow-skinny
- # via onnxruntime
- # via pikepdf
# via sagemaker
# via transformers
- # via unstructured-pytesseract
pandas==2.2.3
# via langchain-cohere
- # via layoutparser
- # via llama-index-legacy
- # via llama-index-readers-file
# via mlflow
# via sagemaker
- # via unstructured
parameterized==0.9.0
# via cohere
pathos==0.3.3
# via sagemaker
-pdf2docx==0.5.8
- # via megaparse
-pdf2image==1.17.0
- # via layoutparser
- # via unstructured
-pdfminer-six==20231228
- # via pdfplumber
- # via unstructured
-pdfplumber==0.11.4
- # via layoutparser
- # via megaparse
-pi-heif==0.18.0
- # via unstructured
-pikepdf==9.3.0
- # via unstructured
pillow==11.0.0
- # via layoutparser
- # via llama-index-core
# via matplotlib
- # via pdf2image
- # via pdfplumber
- # via pi-heif
- # via pikepdf
- # via python-pptx
- # via torchvision
- # via unstructured-pytesseract
platformdirs==4.3.6
# via sagemaker
# via sagemaker-core
-portalocker==2.10.1
- # via iopath
pox==0.3.5
# via pathos
ppft==1.7.6.9
# via pathos
propcache==0.2.0
# via yarl
-proto-plus==1.24.0
- # via google-api-core
- # via google-cloud-vision
protobuf==4.25.5
- # via google-api-core
- # via google-cloud-vision
# via googleapis-common-protos
- # via grpcio-status
# via mlflow-skinny
- # via onnx
- # via onnxruntime
# via opentelemetry-proto
- # via proto-plus
# via sagemaker
# via transformers
psutil==6.0.0
# via sagemaker
- # via unstructured
pyarrow==17.0.0
# via mlflow
pyasn1==0.6.1
@@ -633,12 +368,6 @@ pyasn1==0.6.1
# via rsa
pyasn1-modules==0.4.1
# via google-auth
-pycocotools==2.0.8
- # via effdet
-pycparser==2.22
- # via cffi
-pycryptodome==3.21.0
- # via megaparse
pydantic==2.9.2
# via anthropic
# via chainlit
@@ -648,13 +377,9 @@ pydantic==2.9.2
# via langchain-core
# via langsmith
# via literalai
- # via llama-cloud
- # via llama-index-core
# via openai
# via quivr-core
# via sagemaker-core
- # via sqlmodel
- # via unstructured-client
pydantic-core==2.23.4
# via cohere
# via pydantic
@@ -662,44 +387,18 @@ pygments==2.18.0
# via rich
pyjwt==2.9.0
# via chainlit
-pymupdf==1.24.11
- # via pdf2docx
-pypandoc==1.14
- # via unstructured
pyparsing==3.2.0
# via matplotlib
-pypdf==4.3.1
- # via llama-index-readers-file
- # via unstructured
- # via unstructured-client
-pypdfium2==4.30.0
- # via pdfplumber
python-dateutil==2.8.2
# via botocore
# via matplotlib
# via pandas
- # via unstructured-client
-python-docx==1.1.2
- # via megaparse
- # via pdf2docx
- # via unstructured
python-dotenv==1.0.1
# via chainlit
- # via megaparse
python-engineio==4.10.1
# via python-socketio
-python-iso639==2024.4.27
- # via unstructured
-python-magic==0.4.27
- # via unstructured
python-multipart==0.0.9
# via chainlit
- # via unstructured-inference
-python-oxmsg==0.0.1
- # via unstructured
-python-pptx==1.0.2
- # via megaparse
- # via unstructured
python-socketio==5.11.4
# via chainlit
pytz==2024.2
@@ -709,46 +408,33 @@ pyyaml==6.0.2
# via langchain
# via langchain-community
# via langchain-core
- # via layoutparser
- # via llama-index-core
# via mlflow-skinny
- # via omegaconf
# via sagemaker
# via sagemaker-core
- # via timm
# via transformers
-quivr-core==0.0.18
-rapidfuzz==3.10.0
- # via unstructured
- # via unstructured-inference
+quivr-core @ file:///${PROJECT_ROOT}/../../core
referencing==0.35.1
# via jsonschema
# via jsonschema-specifications
regex==2024.9.11
- # via nltk
# via tiktoken
# via transformers
requests==2.32.3
# via cohere
# via databricks-sdk
# via docker
- # via google-api-core
# via huggingface-hub
# via langchain
# via langchain-community
# via langsmith
- # via llama-index-core
- # via llama-index-legacy
# via mlflow-skinny
# via opentelemetry-exporter-otlp-proto-http
# via requests-toolbelt
# via sagemaker
# via tiktoken
# via transformers
- # via unstructured
requests-toolbelt==1.0.0
# via langsmith
- # via unstructured-client
rich==13.9.2
# via quivr-core
# via sagemaker-core
@@ -760,7 +446,6 @@ rsa==4.9
s3transfer==0.10.3
# via boto3
safetensors==0.4.5
- # via timm
# via transformers
sagemaker==2.232.2
# via cohere
@@ -773,7 +458,6 @@ schema==0.7.7
scikit-learn==1.5.2
# via mlflow
scipy==1.14.1
- # via layoutparser
# via mlflow
# via scikit-learn
sentencepiece==0.2.0
@@ -784,7 +468,6 @@ simple-websocket==1.1.0
# via python-engineio
six==1.16.0
# via google-pasta
- # via langdetect
# via python-dateutil
smdebug-rulesconfig==1.0.1
# via sagemaker
@@ -795,80 +478,44 @@ sniffio==1.3.1
# via anyio
# via httpx
# via openai
-soupsieve==2.6
- # via beautifulsoup4
sqlalchemy==2.0.36
# via alembic
# via langchain
# via langchain-community
- # via llama-index-core
- # via llama-index-legacy
# via mlflow
- # via sqlmodel
-sqlmodel==0.0.22
sqlparse==0.5.1
# via mlflow-skinny
starlette==0.37.2
# via chainlit
# via fastapi
-striprtf==0.0.26
- # via llama-index-readers-file
-sympy==1.13.3
- # via onnxruntime
- # via torch
syncer==2.0.3
# via chainlit
tabulate==0.9.0
# via langchain-cohere
- # via unstructured
tblib==3.0.0
# via sagemaker
tenacity==8.5.0
# via langchain
# via langchain-community
# via langchain-core
- # via llama-index-core
- # via llama-index-legacy
-termcolor==2.5.0
- # via fire
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.8.0
# via langchain-openai
- # via llama-index-core
- # via llama-index-legacy
# via quivr-core
-timm==1.0.11
- # via effdet
- # via unstructured-inference
tokenizers==0.20.1
# via anthropic
# via cohere
# via transformers
tomli==2.0.2
# via chainlit
-torch==2.4.1
- # via effdet
- # via timm
- # via torchvision
- # via unstructured-inference
-torchvision==0.19.1
- # via effdet
- # via timm
tqdm==4.66.5
# via huggingface-hub
- # via iopath
- # via llama-index-core
- # via nltk
# via openai
# via sagemaker
# via transformers
- # via unstructured
transformers==4.45.2
# via quivr-core
- # via unstructured-inference
-triton==3.0.0
- # via torch
types-pyyaml==6.0.12.20240917
# via quivr-core
types-requests==2.32.0.20241016
@@ -879,37 +526,17 @@ typing-extensions==4.12.2
# via cohere
# via fastapi
# via huggingface-hub
- # via iopath
# via langchain-core
- # via llama-index-core
- # via llama-index-legacy
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
- # via python-docx
- # via python-oxmsg
- # via python-pptx
# via sqlalchemy
- # via torch
# via typing-inspect
- # via unstructured
typing-inspect==0.9.0
# via dataclasses-json
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured-client
tzdata==2024.2
# via pandas
-unstructured==0.15.14
- # via megaparse
- # via quivr-core
-unstructured-client==0.26.1
- # via unstructured
-unstructured-inference==0.7.36
- # via unstructured
-unstructured-pytesseract==0.3.13
- # via unstructured
uptrace==1.27.0
# via chainlit
urllib3==2.2.3
@@ -926,15 +553,9 @@ werkzeug==3.0.4
# via flask
wrapt==1.16.0
# via deprecated
- # via llama-index-core
# via opentelemetry-instrumentation
- # via unstructured
wsproto==1.2.0
# via simple-websocket
-xlrd==2.0.1
- # via unstructured
-xlsxwriter==3.2.0
- # via python-pptx
yarl==1.15.4
# via aiohttp
zipp==3.20.2
diff --git a/examples/chatbot/requirements.lock b/examples/chatbot/requirements.lock
index 8a856b6ccbed..69268a3f99f7 100644
--- a/examples/chatbot/requirements.lock
+++ b/examples/chatbot/requirements.lock
@@ -17,8 +17,6 @@ aiohappyeyeballs==2.4.3
aiohttp==3.10.10
# via langchain
# via langchain-community
- # via llama-index-core
- # via llama-index-legacy
aiosignal==1.3.1
# via aiohttp
alembic==1.13.3
@@ -29,8 +27,6 @@ annotated-types==0.7.0
# via pydantic
anthropic==0.36.1
# via langchain-anthropic
-antlr4-python3-runtime==4.9.3
- # via omegaconf
anyio==4.6.2.post1
# via anthropic
# via asyncer
@@ -45,11 +41,6 @@ attrs==23.2.0
# via jsonschema
# via referencing
# via sagemaker
-backoff==2.2.1
- # via unstructured
-beautifulsoup4==4.12.3
- # via llama-index-readers-file
- # via unstructured
bidict==0.23.1
# via python-socketio
blinker==1.8.2
@@ -69,38 +60,23 @@ certifi==2024.8.30
# via httpcore
# via httpx
# via requests
-cffi==1.17.1
- # via cryptography
chainlit==1.2.0
-chardet==5.2.0
- # via unstructured
charset-normalizer==3.4.0
- # via pdfminer-six
# via requests
chevron==0.14.0
# via literalai
click==8.1.7
# via chainlit
# via flask
- # via llama-parse
# via mlflow-skinny
- # via nltk
- # via python-oxmsg
# via uvicorn
cloudpickle==2.2.1
# via mlflow-skinny
# via sagemaker
-cobble==0.1.4
- # via mammoth
cohere==5.11.0
# via langchain-cohere
-coloredlogs==15.0.1
- # via onnxruntime
contourpy==1.3.0
# via matplotlib
-cryptography==43.0.1
- # via pdfminer-six
- # via unstructured-client
cycler==0.12.1
# via matplotlib
databricks-sdk==0.34.0
@@ -108,41 +84,22 @@ databricks-sdk==0.34.0
dataclasses-json==0.6.7
# via chainlit
# via langchain-community
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured
defusedxml==0.7.1
# via langchain-anthropic
deprecated==1.2.14
- # via llama-index-core
- # via llama-index-legacy
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-exporter-otlp-proto-http
# via opentelemetry-semantic-conventions
- # via pikepdf
dill==0.3.9
# via multiprocess
# via pathos
-dirtyjson==1.0.8
- # via llama-index-core
- # via llama-index-legacy
distro==1.9.0
# via anthropic
# via openai
docker==7.1.0
# via mlflow
# via sagemaker
-docx2txt==0.8
- # via quivr-core
-effdet==0.4.1
- # via unstructured
-emoji==2.14.0
- # via unstructured
-et-xmlfile==1.1.0
- # via openpyxl
-eval-type-backport==0.2.0
- # via unstructured-client
faiss-cpu==1.9.0
# via quivr-core
fastapi==0.112.4
@@ -151,46 +108,27 @@ fastavro==1.9.7
# via cohere
filelock==3.16.1
# via huggingface-hub
- # via torch
# via transformers
- # via triton
filetype==1.2.0
# via chainlit
- # via unstructured
-fire==0.7.0
- # via pdf2docx
flask==3.0.3
# via mlflow
-flatbuffers==24.3.25
- # via onnxruntime
fonttools==4.54.1
# via matplotlib
- # via pdf2docx
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.9.0
# via huggingface-hub
- # via llama-index-core
- # via llama-index-legacy
- # via torch
gitdb==4.0.11
# via gitpython
gitpython==3.1.43
# via mlflow-skinny
-google-api-core==2.21.0
- # via google-cloud-vision
google-auth==2.35.0
# via databricks-sdk
- # via google-api-core
- # via google-cloud-vision
-google-cloud-vision==3.7.4
- # via unstructured
google-pasta==0.2.0
# via sagemaker
googleapis-common-protos==1.65.0
- # via google-api-core
- # via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-exporter-otlp-proto-http
graphene==3.3
@@ -203,11 +141,7 @@ graphql-relay==3.2.0
greenlet==3.1.1
# via sqlalchemy
grpcio==1.67.0
- # via google-api-core
- # via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
-grpcio-status==1.62.3
- # via google-api-core
gunicorn==23.0.0
# via mlflow
h11==0.14.0
@@ -223,22 +157,14 @@ httpx==0.27.2
# via langgraph-sdk
# via langsmith
# via literalai
- # via llama-cloud
- # via llama-index-core
- # via llama-index-legacy
# via openai
# via quivr-core
- # via unstructured-client
httpx-sse==0.4.0
# via cohere
# via langgraph-sdk
huggingface-hub==0.25.2
- # via timm
# via tokenizers
# via transformers
- # via unstructured-inference
-humanfriendly==10.0
- # via coloredlogs
idna==3.10
# via anyio
# via httpx
@@ -249,14 +175,11 @@ importlib-metadata==6.11.0
# via opentelemetry-api
# via sagemaker
# via sagemaker-core
-iopath==0.1.10
- # via layoutparser
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via mlflow
- # via torch
jiter==0.6.1
# via anthropic
# via openai
@@ -264,12 +187,9 @@ jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
- # via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain-core
-jsonpath-python==1.0.6
- # via unstructured-client
jsonpointer==3.0.0
# via jsonpatch
jsonschema==4.23.0
@@ -281,14 +201,13 @@ kiwisolver==1.4.7
# via matplotlib
langchain==0.2.16
# via langchain-community
- # via megaparse
# via quivr-core
langchain-anthropic==0.1.23
# via quivr-core
langchain-cohere==0.2.4
+ # via quivr-core
langchain-community==0.2.17
# via langchain-experimental
- # via megaparse
# via quivr-core
langchain-core==0.2.41
# via langchain
@@ -300,16 +219,13 @@ langchain-core==0.2.41
# via langchain-text-splitters
# via langgraph
# via langgraph-checkpoint
- # via megaparse
# via quivr-core
langchain-experimental==0.0.65
# via langchain-cohere
langchain-openai==0.1.25
- # via megaparse
+ # via quivr-core
langchain-text-splitters==0.2.4
# via langchain
-langdetect==1.0.9
- # via unstructured
langgraph==0.2.38
# via quivr-core
langgraph-checkpoint==2.0.1
@@ -320,74 +236,14 @@ langsmith==0.1.135
# via langchain
# via langchain-community
# via langchain-core
-layoutparser==0.3.4
- # via unstructured-inference
lazify==0.4.0
# via chainlit
literalai==0.0.607
# via chainlit
-llama-cloud==0.1.2
- # via llama-index-indices-managed-llama-cloud
-llama-index==0.11.18
- # via megaparse
-llama-index-agent-openai==0.3.4
- # via llama-index
- # via llama-index-program-openai
-llama-index-cli==0.3.1
- # via llama-index
-llama-index-core==0.11.18
- # via llama-index
- # via llama-index-agent-openai
- # via llama-index-cli
- # via llama-index-embeddings-openai
- # via llama-index-indices-managed-llama-cloud
- # via llama-index-llms-openai
- # via llama-index-multi-modal-llms-openai
- # via llama-index-program-openai
- # via llama-index-question-gen-openai
- # via llama-index-readers-file
- # via llama-index-readers-llama-parse
- # via llama-parse
-llama-index-embeddings-openai==0.2.5
- # via llama-index
- # via llama-index-cli
-llama-index-indices-managed-llama-cloud==0.4.0
- # via llama-index
-llama-index-legacy==0.9.48.post3
- # via llama-index
-llama-index-llms-openai==0.2.15
- # via llama-index
- # via llama-index-agent-openai
- # via llama-index-cli
- # via llama-index-multi-modal-llms-openai
- # via llama-index-program-openai
- # via llama-index-question-gen-openai
-llama-index-multi-modal-llms-openai==0.2.2
- # via llama-index
-llama-index-program-openai==0.2.0
- # via llama-index
- # via llama-index-question-gen-openai
-llama-index-question-gen-openai==0.2.0
- # via llama-index
-llama-index-readers-file==0.2.2
- # via llama-index
-llama-index-readers-llama-parse==0.3.0
- # via llama-index
-llama-parse==0.5.9
- # via llama-index-readers-llama-parse
- # via megaparse
-lxml==5.3.0
- # via pikepdf
- # via python-docx
- # via python-pptx
- # via unstructured
mako==1.3.5
# via alembic
-mammoth==1.8.0
- # via megaparse
markdown==3.7
# via mlflow
- # via unstructured
markdown-it-py==3.0.0
# via rich
markupsafe==3.0.1
@@ -398,20 +254,14 @@ marshmallow==3.22.0
# via dataclasses-json
matplotlib==3.9.2
# via mlflow
- # via pycocotools
- # via unstructured-inference
mdurl==0.1.2
# via markdown-it-py
-megaparse==0.0.31
- # via quivr-core
mlflow==2.17.0
# via sagemaker-mlflow
mlflow-skinny==2.17.0
# via mlflow
mock==4.0.3
# via sagemaker-core
-mpmath==1.3.0
- # via sympy
msgpack==1.1.0
# via langgraph-checkpoint
multidict==6.1.0
@@ -423,94 +273,22 @@ mypy-extensions==1.0.0
# via typing-inspect
nest-asyncio==1.6.0
# via chainlit
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured-client
-networkx==3.4.1
- # via llama-index-core
- # via llama-index-legacy
- # via torch
- # via unstructured
-nltk==3.9.1
- # via llama-index
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured
numpy==1.26.4
# via chainlit
# via contourpy
# via faiss-cpu
# via langchain
# via langchain-community
- # via layoutparser
- # via llama-index-core
- # via llama-index-legacy
# via matplotlib
# via mlflow
- # via onnx
- # via onnxruntime
- # via opencv-python
- # via opencv-python-headless
# via pandas
- # via pdf2docx
# via pyarrow
- # via pycocotools
# via sagemaker
# via scikit-learn
# via scipy
- # via torchvision
# via transformers
- # via unstructured
-nvidia-cublas-cu12==12.1.3.1
- # via nvidia-cudnn-cu12
- # via nvidia-cusolver-cu12
- # via torch
-nvidia-cuda-cupti-cu12==12.1.105
- # via torch
-nvidia-cuda-nvrtc-cu12==12.1.105
- # via torch
-nvidia-cuda-runtime-cu12==12.1.105
- # via torch
-nvidia-cudnn-cu12==9.1.0.70
- # via torch
-nvidia-cufft-cu12==11.0.2.54
- # via torch
-nvidia-curand-cu12==10.3.2.106
- # via torch
-nvidia-cusolver-cu12==11.4.5.107
- # via torch
-nvidia-cusparse-cu12==12.1.0.106
- # via nvidia-cusolver-cu12
- # via torch
-nvidia-nccl-cu12==2.20.5
- # via torch
-nvidia-nvjitlink-cu12==12.6.77
- # via nvidia-cusolver-cu12
- # via nvidia-cusparse-cu12
-nvidia-nvtx-cu12==12.1.105
- # via torch
-olefile==0.47
- # via python-oxmsg
-omegaconf==2.3.0
- # via effdet
-onnx==1.17.0
- # via unstructured
- # via unstructured-inference
-onnxruntime==1.19.2
- # via unstructured-inference
openai==1.51.2
# via langchain-openai
- # via llama-index-agent-openai
- # via llama-index-embeddings-openai
- # via llama-index-legacy
- # via llama-index-llms-openai
-opencv-python==4.10.0.84
- # via layoutparser
- # via unstructured-inference
-opencv-python-headless==4.10.0.84
- # via pdf2docx
-openpyxl==3.1.5
- # via unstructured
opentelemetry-api==1.27.0
# via mlflow-skinny
# via opentelemetry-exporter-otlp-proto-grpc
@@ -554,78 +332,35 @@ packaging==23.2
# via marshmallow
# via matplotlib
# via mlflow-skinny
- # via onnxruntime
- # via pikepdf
# via sagemaker
# via transformers
- # via unstructured-pytesseract
pandas==2.2.3
# via langchain-cohere
- # via layoutparser
- # via llama-index-legacy
- # via llama-index-readers-file
# via mlflow
# via sagemaker
- # via unstructured
parameterized==0.9.0
# via cohere
pathos==0.3.3
# via sagemaker
-pdf2docx==0.5.8
- # via megaparse
-pdf2image==1.17.0
- # via layoutparser
- # via unstructured
-pdfminer-six==20231228
- # via pdfplumber
- # via unstructured
-pdfplumber==0.11.4
- # via layoutparser
- # via megaparse
-pi-heif==0.18.0
- # via unstructured
-pikepdf==9.3.0
- # via unstructured
pillow==11.0.0
- # via layoutparser
- # via llama-index-core
# via matplotlib
- # via pdf2image
- # via pdfplumber
- # via pi-heif
- # via pikepdf
- # via python-pptx
- # via torchvision
- # via unstructured-pytesseract
platformdirs==4.3.6
# via sagemaker
# via sagemaker-core
-portalocker==2.10.1
- # via iopath
pox==0.3.5
# via pathos
ppft==1.7.6.9
# via pathos
propcache==0.2.0
# via yarl
-proto-plus==1.24.0
- # via google-api-core
- # via google-cloud-vision
protobuf==4.25.5
- # via google-api-core
- # via google-cloud-vision
# via googleapis-common-protos
- # via grpcio-status
# via mlflow-skinny
- # via onnx
- # via onnxruntime
# via opentelemetry-proto
- # via proto-plus
# via sagemaker
# via transformers
psutil==6.0.0
# via sagemaker
- # via unstructured
pyarrow==17.0.0
# via mlflow
pyasn1==0.6.1
@@ -633,12 +368,6 @@ pyasn1==0.6.1
# via rsa
pyasn1-modules==0.4.1
# via google-auth
-pycocotools==2.0.8
- # via effdet
-pycparser==2.22
- # via cffi
-pycryptodome==3.21.0
- # via megaparse
pydantic==2.9.2
# via anthropic
# via chainlit
@@ -648,13 +377,9 @@ pydantic==2.9.2
# via langchain-core
# via langsmith
# via literalai
- # via llama-cloud
- # via llama-index-core
# via openai
# via quivr-core
# via sagemaker-core
- # via sqlmodel
- # via unstructured-client
pydantic-core==2.23.4
# via cohere
# via pydantic
@@ -662,44 +387,18 @@ pygments==2.18.0
# via rich
pyjwt==2.9.0
# via chainlit
-pymupdf==1.24.11
- # via pdf2docx
-pypandoc==1.14
- # via unstructured
pyparsing==3.2.0
# via matplotlib
-pypdf==4.3.1
- # via llama-index-readers-file
- # via unstructured
- # via unstructured-client
-pypdfium2==4.30.0
- # via pdfplumber
python-dateutil==2.8.2
# via botocore
# via matplotlib
# via pandas
- # via unstructured-client
-python-docx==1.1.2
- # via megaparse
- # via pdf2docx
- # via unstructured
python-dotenv==1.0.1
# via chainlit
- # via megaparse
python-engineio==4.10.1
# via python-socketio
-python-iso639==2024.4.27
- # via unstructured
-python-magic==0.4.27
- # via unstructured
python-multipart==0.0.9
# via chainlit
- # via unstructured-inference
-python-oxmsg==0.0.1
- # via unstructured
-python-pptx==1.0.2
- # via megaparse
- # via unstructured
python-socketio==5.11.4
# via chainlit
pytz==2024.2
@@ -709,46 +408,33 @@ pyyaml==6.0.2
# via langchain
# via langchain-community
# via langchain-core
- # via layoutparser
- # via llama-index-core
# via mlflow-skinny
- # via omegaconf
# via sagemaker
# via sagemaker-core
- # via timm
# via transformers
-quivr-core==0.0.18
-rapidfuzz==3.10.0
- # via unstructured
- # via unstructured-inference
+quivr-core @ file:///${PROJECT_ROOT}/../../core
referencing==0.35.1
# via jsonschema
# via jsonschema-specifications
regex==2024.9.11
- # via nltk
# via tiktoken
# via transformers
requests==2.32.3
# via cohere
# via databricks-sdk
# via docker
- # via google-api-core
# via huggingface-hub
# via langchain
# via langchain-community
# via langsmith
- # via llama-index-core
- # via llama-index-legacy
# via mlflow-skinny
# via opentelemetry-exporter-otlp-proto-http
# via requests-toolbelt
# via sagemaker
# via tiktoken
# via transformers
- # via unstructured
requests-toolbelt==1.0.0
# via langsmith
- # via unstructured-client
rich==13.9.2
# via quivr-core
# via sagemaker-core
@@ -760,7 +446,6 @@ rsa==4.9
s3transfer==0.10.3
# via boto3
safetensors==0.4.5
- # via timm
# via transformers
sagemaker==2.232.2
# via cohere
@@ -773,7 +458,6 @@ schema==0.7.7
scikit-learn==1.5.2
# via mlflow
scipy==1.14.1
- # via layoutparser
# via mlflow
# via scikit-learn
sentencepiece==0.2.0
@@ -784,7 +468,6 @@ simple-websocket==1.1.0
# via python-engineio
six==1.16.0
# via google-pasta
- # via langdetect
# via python-dateutil
smdebug-rulesconfig==1.0.1
# via sagemaker
@@ -795,80 +478,44 @@ sniffio==1.3.1
# via anyio
# via httpx
# via openai
-soupsieve==2.6
- # via beautifulsoup4
sqlalchemy==2.0.36
# via alembic
# via langchain
# via langchain-community
- # via llama-index-core
- # via llama-index-legacy
# via mlflow
- # via sqlmodel
-sqlmodel==0.0.22
sqlparse==0.5.1
# via mlflow-skinny
starlette==0.37.2
# via chainlit
# via fastapi
-striprtf==0.0.26
- # via llama-index-readers-file
-sympy==1.13.3
- # via onnxruntime
- # via torch
syncer==2.0.3
# via chainlit
tabulate==0.9.0
# via langchain-cohere
- # via unstructured
tblib==3.0.0
# via sagemaker
tenacity==8.5.0
# via langchain
# via langchain-community
# via langchain-core
- # via llama-index-core
- # via llama-index-legacy
-termcolor==2.5.0
- # via fire
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.8.0
# via langchain-openai
- # via llama-index-core
- # via llama-index-legacy
# via quivr-core
-timm==1.0.11
- # via effdet
- # via unstructured-inference
tokenizers==0.20.1
# via anthropic
# via cohere
# via transformers
tomli==2.0.2
# via chainlit
-torch==2.4.1
- # via effdet
- # via timm
- # via torchvision
- # via unstructured-inference
-torchvision==0.19.1
- # via effdet
- # via timm
tqdm==4.66.5
# via huggingface-hub
- # via iopath
- # via llama-index-core
- # via nltk
# via openai
# via sagemaker
# via transformers
- # via unstructured
transformers==4.45.2
# via quivr-core
- # via unstructured-inference
-triton==3.0.0
- # via torch
types-pyyaml==6.0.12.20240917
# via quivr-core
types-requests==2.32.0.20241016
@@ -879,37 +526,17 @@ typing-extensions==4.12.2
# via cohere
# via fastapi
# via huggingface-hub
- # via iopath
# via langchain-core
- # via llama-index-core
- # via llama-index-legacy
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
- # via python-docx
- # via python-oxmsg
- # via python-pptx
# via sqlalchemy
- # via torch
# via typing-inspect
- # via unstructured
typing-inspect==0.9.0
# via dataclasses-json
- # via llama-index-core
- # via llama-index-legacy
- # via unstructured-client
tzdata==2024.2
# via pandas
-unstructured==0.15.14
- # via megaparse
- # via quivr-core
-unstructured-client==0.26.1
- # via unstructured
-unstructured-inference==0.7.36
- # via unstructured
-unstructured-pytesseract==0.3.13
- # via unstructured
uptrace==1.27.0
# via chainlit
urllib3==2.2.3
@@ -926,15 +553,9 @@ werkzeug==3.0.4
# via flask
wrapt==1.16.0
# via deprecated
- # via llama-index-core
# via opentelemetry-instrumentation
- # via unstructured
wsproto==1.2.0
# via simple-websocket
-xlrd==2.0.1
- # via unstructured
-xlsxwriter==3.2.0
- # via python-pptx
yarl==1.15.4
# via aiohttp
zipp==3.20.2
diff --git a/examples/simple_question/requirements-dev.lock b/examples/simple_question/requirements-dev.lock
index dc66f67d6b68..2083a1da619b 100644
--- a/examples/simple_question/requirements-dev.lock
+++ b/examples/simple_question/requirements-dev.lock
@@ -20,13 +20,13 @@ aiosignal==1.3.1
# via aiohttp
annotated-types==0.7.0
# via pydantic
-anthropic==0.36.1
+anthropic==0.36.2
# via langchain-anthropic
anyio==4.6.2.post1
# via anthropic
# via httpx
# via openai
-attrs==23.2.0
+attrs==24.2.0
# via aiohttp
certifi==2024.8.30
# via httpcore
@@ -53,7 +53,7 @@ filelock==3.16.1
frozenlist==1.4.1
# via aiohttp
# via aiosignal
-fsspec==2024.9.0
+fsspec==2024.10.0
# via huggingface-hub
greenlet==3.1.1
# via sqlalchemy
@@ -71,7 +71,7 @@ httpx==0.27.2
httpx-sse==0.4.0
# via cohere
# via langgraph-sdk
-huggingface-hub==0.25.2
+huggingface-hub==0.26.1
# via tokenizers
# via transformers
idna==3.10
@@ -113,19 +113,19 @@ langchain-openai==0.1.25
# via quivr-core
langchain-text-splitters==0.2.4
# via langchain
-langgraph==0.2.38
+langgraph==0.2.39
# via quivr-core
langgraph-checkpoint==2.0.1
# via langgraph
langgraph-sdk==0.1.33
# via langgraph
-langsmith==0.1.135
+langsmith==0.1.136
# via langchain
# via langchain-community
# via langchain-core
markdown-it-py==3.0.0
# via rich
-marshmallow==3.22.0
+marshmallow==3.23.0
# via dataclasses-json
mdurl==0.1.2
# via markdown-it-py
@@ -144,7 +144,7 @@ numpy==1.26.4
# via transformers
openai==1.52.0
# via langchain-openai
-orjson==3.10.7
+orjson==3.10.9
# via langgraph-sdk
# via langsmith
packaging==24.1
@@ -159,7 +159,7 @@ parameterized==0.9.0
# via cohere
propcache==0.2.0
# via yarl
-protobuf==4.25.5
+protobuf==5.28.2
# via transformers
pydantic==2.9.2
# via anthropic
@@ -255,5 +255,5 @@ tzdata==2024.2
urllib3==2.2.3
# via requests
# via types-requests
-yarl==1.15.4
+yarl==1.15.5
# via aiohttp
diff --git a/examples/simple_question/requirements.lock b/examples/simple_question/requirements.lock
index dc66f67d6b68..2083a1da619b 100644
--- a/examples/simple_question/requirements.lock
+++ b/examples/simple_question/requirements.lock
@@ -20,13 +20,13 @@ aiosignal==1.3.1
# via aiohttp
annotated-types==0.7.0
# via pydantic
-anthropic==0.36.1
+anthropic==0.36.2
# via langchain-anthropic
anyio==4.6.2.post1
# via anthropic
# via httpx
# via openai
-attrs==23.2.0
+attrs==24.2.0
# via aiohttp
certifi==2024.8.30
# via httpcore
@@ -53,7 +53,7 @@ filelock==3.16.1
frozenlist==1.4.1
# via aiohttp
# via aiosignal
-fsspec==2024.9.0
+fsspec==2024.10.0
# via huggingface-hub
greenlet==3.1.1
# via sqlalchemy
@@ -71,7 +71,7 @@ httpx==0.27.2
httpx-sse==0.4.0
# via cohere
# via langgraph-sdk
-huggingface-hub==0.25.2
+huggingface-hub==0.26.1
# via tokenizers
# via transformers
idna==3.10
@@ -113,19 +113,19 @@ langchain-openai==0.1.25
# via quivr-core
langchain-text-splitters==0.2.4
# via langchain
-langgraph==0.2.38
+langgraph==0.2.39
# via quivr-core
langgraph-checkpoint==2.0.1
# via langgraph
langgraph-sdk==0.1.33
# via langgraph
-langsmith==0.1.135
+langsmith==0.1.136
# via langchain
# via langchain-community
# via langchain-core
markdown-it-py==3.0.0
# via rich
-marshmallow==3.22.0
+marshmallow==3.23.0
# via dataclasses-json
mdurl==0.1.2
# via markdown-it-py
@@ -144,7 +144,7 @@ numpy==1.26.4
# via transformers
openai==1.52.0
# via langchain-openai
-orjson==3.10.7
+orjson==3.10.9
# via langgraph-sdk
# via langsmith
packaging==24.1
@@ -159,7 +159,7 @@ parameterized==0.9.0
# via cohere
propcache==0.2.0
# via yarl
-protobuf==4.25.5
+protobuf==5.28.2
# via transformers
pydantic==2.9.2
# via anthropic
@@ -255,5 +255,5 @@ tzdata==2024.2
urllib3==2.2.3
# via requests
# via types-requests
-yarl==1.15.4
+yarl==1.15.5
# via aiohttp
diff --git a/examples/simple_question/simple_question.py b/examples/simple_question/simple_question.py
index 33537d45121d..4067635e9722 100644
--- a/examples/simple_question/simple_question.py
+++ b/examples/simple_question/simple_question.py
@@ -15,4 +15,4 @@
answer = brain.ask(
"what is gold? asnwer in french"
)
- print("answer QuivrQARAGLangGraph :", answer)
+ print("answer QuivrQARAGLangGraph :", answer.answer)