Skip to content

Commit

Permalink
v0.11.x (#15398)
Browse files Browse the repository at this point in the history
  • Loading branch information
masci authored Aug 22, 2024
1 parent ef9a21c commit 78f9a11
Show file tree
Hide file tree
Showing 953 changed files with 4,975 additions and 6,897 deletions.
2 changes: 0 additions & 2 deletions .github/workflows/build_package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
cache-dependency-path: "**/poetry.lock"
- name: Install deps
shell: bash
run: poetry install
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@ credentials.json
token.json
.python-version
.DS_Store
/storage/
12 changes: 12 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
# ChangeLog

## [2024-08-22]

### `llama-index-core` [0.11.0]

- removed deprecated `ServiceContext` -- using this now will print an error with a link to the migration guide
- removed deprecated `LLMPredictor` -- using this now will print an error, any existing LLM is a drop-in replacement
- made `pandas` an optional dependency

### `Everything Else`

- bumped the minor version of every package to account for the new version of `llama-index-core`

## [2024-08-21]

### `llama-index-core` [0.10.68]
Expand Down
9 changes: 9 additions & 0 deletions docs/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
python_sources()

poetry_requirements(
name="poetry",
)

python_requirements(
name="reqs",
)
20 changes: 9 additions & 11 deletions llama-index-cli/llama_index/cli/rag/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import Any, Callable, Dict, Optional, Union, cast

from llama_index.core import (
Settings,
SimpleDirectoryReader,
VectorStoreIndex,
)
Expand All @@ -16,9 +17,8 @@
StreamingResponse,
Response,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, validator
from llama_index.core.bridge.pydantic import BaseModel, Field, field_validator
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.indices.service_context import ServiceContext
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.llms import LLM
from llama_index.core.query_engine import CustomQueryEngine
Expand Down Expand Up @@ -100,7 +100,7 @@ class RagCLI(BaseModel):
class Config:
arbitrary_types_allowed = True

@validator("query_pipeline", always=True)
@field_validator("query_pipeline", mode="before")
def query_pipeline_from_ingestion_pipeline(
cls, query_pipeline: Any, values: Dict[str, Any]
) -> Optional[QueryPipeline]:
Expand All @@ -127,15 +127,13 @@ def query_pipeline_from_ingestion_pipeline(
embed_model = transformation
break

service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_model or "default"
)
Settings.llm = llm
Settings.embed_model = embed_model

retriever = VectorStoreIndex.from_vector_store(
ingestion_pipeline.vector_store, service_context=service_context
ingestion_pipeline.vector_store,
).as_retriever(similarity_top_k=8)
response_synthesizer = CompactAndRefine(
service_context=service_context, streaming=True, verbose=verbose
)
response_synthesizer = CompactAndRefine(streaming=True, verbose=verbose)

# define query pipeline
query_pipeline = QueryPipeline(verbose=verbose)
Expand All @@ -151,7 +149,7 @@ def query_pipeline_from_ingestion_pipeline(
query_pipeline.add_link("query", "summarizer", dest_key="query_str")
return query_pipeline

@validator("chat_engine", always=True)
@field_validator("chat_engine", mode="before")
def chat_engine_from_query_pipeline(
cls, chat_engine: Any, values: Dict[str, Any]
) -> Optional[CondenseQuestionChatEngine]:
Expand Down
1 change: 0 additions & 1 deletion llama-index-cli/llama_index/cli/upgrade/mappings.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
{
"StorageContext": "llama_index.core",
"ServiceContext": "llama_index.core",
"ComposableGraph": "llama_index.core",
"# indicesSummaryIndex": "llama_index.core",
"VectorStoreIndex": "llama_index.core",
Expand Down
8 changes: 4 additions & 4 deletions llama-index-cli/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ maintainers = [
name = "llama-index-cli"
packages = [{include = "llama_index/"}]
readme = "README.md"
version = "0.1.13"
version = "0.3.0"

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
llama-index-core = "^0.10.11.post1"
llama-index-embeddings-openai = "^0.1.1"
llama-index-llms-openai = "^0.1.1"
llama-index-core = "^0.11.0"
llama-index-embeddings-openai = "^0.2.0"
llama-index-llms-openai = "^0.2.0"

[tool.poetry.group.dev.dependencies]
black = {extras = ["jupyter"], version = "<=23.9.1,>=23.7.0"}
Expand Down
5 changes: 1 addition & 4 deletions llama-index-core/llama_index/core/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Init file of LlamaIndex."""

__version__ = "0.10.68.post1"
__version__ = "0.11.0"

import logging
from logging import NullHandler
Expand Down Expand Up @@ -152,8 +152,5 @@
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder

# global service context for ServiceContext.from_defaults()
global_service_context: Optional[ServiceContext] = None

# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
TaskStepOutput,
)
from llama_index.core.base.query_pipeline.query import QueryComponent
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
from llama_index.core.callbacks import (
CallbackManager,
trace_method,
Expand Down Expand Up @@ -72,14 +72,12 @@ class QueryPipelineAgentWorker(BaseModel, BaseAgentWorker):
"""

model_config = ConfigDict(arbitrary_types_allowed=True)
pipeline: QueryPipeline = Field(..., description="Query pipeline")
callback_manager: CallbackManager = Field(..., exclude=True)
task_key: str = Field("task", description="Key to store task in state")
step_state_key: str = Field("step_state", description="Key to store step in state")

class Config:
arbitrary_types_allowed = True

def __init__(
self,
pipeline: QueryPipeline,
Expand Down
27 changes: 12 additions & 15 deletions llama-index-core/llama_index/core/agent/custom/simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
TaskStep,
TaskStepOutput,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr, ConfigDict
from llama_index.core.callbacks import (
CallbackManager,
trace_method,
Expand Down Expand Up @@ -55,6 +55,7 @@ class CustomSimpleAgentWorker(BaseModel, BaseAgentWorker):
"""

model_config = ConfigDict(arbitrary_types_allowed=True)
tools: Sequence[BaseTool] = Field(..., description="Tools to use for reasoning")
llm: LLM = Field(..., description="LLM to use")
callback_manager: CallbackManager = Field(
Expand All @@ -67,9 +68,6 @@ class CustomSimpleAgentWorker(BaseModel, BaseAgentWorker):

_get_tools: Callable[[str], Sequence[BaseTool]] = PrivateAttr()

class Config:
arbitrary_types_allowed = True

def __init__(
self,
tools: Sequence[BaseTool],
Expand All @@ -79,18 +77,7 @@ def __init__(
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
**kwargs: Any,
) -> None:
if len(tools) > 0 and tool_retriever is not None:
raise ValueError("Cannot specify both tools and tool_retriever")
elif len(tools) > 0:
self._get_tools = lambda _: tools
elif tool_retriever is not None:
tool_retriever_c = cast(ObjectRetriever[BaseTool], tool_retriever)
self._get_tools = lambda message: tool_retriever_c.retrieve(message)
else:
self._get_tools = lambda _: []

callback_manager = callback_manager or CallbackManager([])

super().__init__(
tools=tools,
llm=llm,
Expand All @@ -100,6 +87,16 @@ def __init__(
**kwargs,
)

if len(tools) > 0 and tool_retriever is not None:
raise ValueError("Cannot specify both tools and tool_retriever")
elif len(tools) > 0:
self._get_tools = lambda _: tools
elif tool_retriever is not None:
tool_retriever_c = cast(ObjectRetriever[BaseTool], tool_retriever)
self._get_tools = lambda message: tool_retriever_c.retrieve(message)
else:
self._get_tools = lambda _: []

@classmethod
def from_tools(
cls,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
TaskStep,
TaskStepOutput,
)
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
from llama_index.core.callbacks import (
CallbackManager,
trace_method,
Expand Down Expand Up @@ -44,6 +44,7 @@ class FnAgentWorker(BaseModel, BaseAgentWorker):
"""

model_config = ConfigDict(arbitrary_types_allowed=True)
fn: Callable = Field(..., description="Function to run.")
async_fn: Optional[Callable] = Field(
None, description="Async function to run. If not provided, will run `fn`."
Expand All @@ -56,9 +57,6 @@ class FnAgentWorker(BaseModel, BaseAgentWorker):

verbose: bool = Field(False, description="Verbose mode.")

class Config:
arbitrary_types_allowed = True

def __init__(
self,
fn: Callable,
Expand Down
5 changes: 2 additions & 3 deletions llama-index-core/llama_index/core/agent/react/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
ObservationReasoningStep,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from llama_index.core.tools import BaseTool

logger = logging.getLogger(__name__)
Expand All @@ -36,8 +36,7 @@ def get_react_tool_descriptions(tools: Sequence[BaseTool]) -> List[str]:
class BaseAgentChatFormatter(BaseModel):
"""Base chat formatter."""

class Config:
arbitrary_types_allowed = True
model_config = ConfigDict(arbitrary_types_allowed=True)

@abstractmethod
def format(
Expand Down
27 changes: 14 additions & 13 deletions llama-index-core/llama_index/core/base/agent/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,12 @@
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
SerializeAsAny,
ConfigDict,
)
from llama_index.core.callbacks import CallbackManager, trace_method
from llama_index.core.chat_engine.types import (
BaseChatEngine,
Expand Down Expand Up @@ -80,11 +85,11 @@ class TaskStep(BaseModel):
"""

task_id: str = Field(..., diescription="Task ID")
task_id: str = Field(..., description="Task ID")
step_id: str = Field(..., description="Step ID")
input: Optional[str] = Field(default=None, description="User input")
# memory: BaseMemory = Field(
# ..., type=BaseMemory, description="Conversational Memory"
# ..., description="Conversational Memory"
# )
step_state: Dict[str, Any] = Field(
default_factory=dict, description="Additional state for a given step."
Expand Down Expand Up @@ -155,25 +160,22 @@ class Task(BaseModel):
"""

class Config:
arbitrary_types_allowed = True

model_config = ConfigDict(arbitrary_types_allowed=True)
task_id: str = Field(
default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID"
default_factory=lambda: str(uuid.uuid4()), description="Task ID"
)
input: str = Field(..., type=str, description="User input")
input: str = Field(..., description="User input")

# NOTE: this is state that may be modified throughout the course of execution of the task
memory: BaseMemory = Field(
memory: SerializeAsAny[BaseMemory] = Field(
...,
type=BaseMemory,
description=(
"Conversational Memory. Maintains state before execution of this task."
),
)

callback_manager: CallbackManager = Field(
default_factory=CallbackManager,
default_factory=lambda: CallbackManager([]),
exclude=True,
description="Callback manager for the task.",
)
Expand All @@ -190,8 +192,7 @@ class Config:
class BaseAgentWorker(PromptMixin, DispatcherSpanMixin):
"""Base agent worker."""

class Config:
arbitrary_types_allowed = True
model_config = ConfigDict(arbitrary_types_allowed=True)

def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
Expand Down
10 changes: 5 additions & 5 deletions llama-index-core/llama_index/core/base/base_query_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
validate_and_convert_stringable,
)
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import Field
from llama_index.core.bridge.pydantic import Field, ConfigDict, SerializeAsAny
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.prompts.mixin import PromptDictType, PromptMixin
from llama_index.core.schema import NodeWithScore, QueryBundle, QueryType
Expand Down Expand Up @@ -108,10 +108,10 @@ def _as_query_component(self, **kwargs: Any) -> QueryComponent:
class QueryEngineComponent(QueryComponent):
"""Query engine component."""

query_engine: BaseQueryEngine = Field(..., description="Query engine")

class Config:
arbitrary_types_allowed = True
model_config = ConfigDict(arbitrary_types_allowed=True)
query_engine: SerializeAsAny[BaseQueryEngine] = Field(
..., description="Query engine"
)

def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
Expand Down
Loading

0 comments on commit 78f9a11

Please sign in to comment.