From e1fa0a598c1b7eaf1ae2ffaee1787de3a5a87bad Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Oct 2024 22:35:46 +0000 Subject: [PATCH] feat(api): rename to chat_completion_chunk (#81) --- api.md | 2 +- src/writerai/resources/chat.py | 17 +- src/writerai/types/__init__.py | 1 + src/writerai/types/chat_completion_chunk.py | 257 ++++++++++++++++++++ 4 files changed, 268 insertions(+), 9 deletions(-) create mode 100644 src/writerai/types/chat_completion_chunk.py diff --git a/api.md b/api.md index e4a03a5..40e6fd4 100644 --- a/api.md +++ b/api.md @@ -15,7 +15,7 @@ Methods: Types: ```python -from writerai.types import Chat +from writerai.types import Chat, ChatCompletionChunk ``` Methods: diff --git a/src/writerai/resources/chat.py b/src/writerai/resources/chat.py index aef685c..a276e98 100644 --- a/src/writerai/resources/chat.py +++ b/src/writerai/resources/chat.py @@ -25,6 +25,7 @@ from .._streaming import Stream, AsyncStream from ..types.chat import Chat from .._base_client import make_request_options +from ..types.chat_completion_chunk import ChatCompletionChunk __all__ = ["ChatResource", "AsyncChatResource"] @@ -155,7 +156,7 @@ def chat( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Stream[Chat]: + ) -> Stream[ChatCompletionChunk]: """Generate a chat completion based on the provided messages. The response shown @@ -239,7 +240,7 @@ def chat( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Chat | Stream[Chat]: + ) -> Chat | Stream[ChatCompletionChunk]: """Generate a chat completion based on the provided messages. The response shown @@ -323,7 +324,7 @@ def chat( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Chat | Stream[Chat]: + ) -> Chat | Stream[ChatCompletionChunk]: return self._post( "/v1/chat", body=maybe_transform( @@ -348,7 +349,7 @@ def chat( ), cast_to=Chat, stream=stream or False, - stream_cls=Stream[Chat], + stream_cls=Stream[ChatCompletionChunk], ) @@ -478,7 +479,7 @@ async def chat( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncStream[Chat]: + ) -> AsyncStream[ChatCompletionChunk]: """Generate a chat completion based on the provided messages. The response shown @@ -562,7 +563,7 @@ async def chat( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Chat | AsyncStream[Chat]: + ) -> Chat | AsyncStream[ChatCompletionChunk]: """Generate a chat completion based on the provided messages. The response shown @@ -646,7 +647,7 @@ async def chat( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Chat | AsyncStream[Chat]: + ) -> Chat | AsyncStream[ChatCompletionChunk]: return await self._post( "/v1/chat", body=await async_maybe_transform( @@ -671,7 +672,7 @@ async def chat( ), cast_to=Chat, stream=stream or False, - stream_cls=AsyncStream[Chat], + stream_cls=AsyncStream[ChatCompletionChunk], ) diff --git a/src/writerai/types/__init__.py b/src/writerai/types/__init__.py index f3f465e..95ddde6 100644 --- a/src/writerai/types/__init__.py +++ b/src/writerai/types/__init__.py @@ -18,6 +18,7 @@ from .graph_update_params import GraphUpdateParams as GraphUpdateParams from .model_list_response import ModelListResponse as ModelListResponse from .file_delete_response import FileDeleteResponse as FileDeleteResponse +from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .graph_create_response import GraphCreateResponse as GraphCreateResponse from .graph_delete_response import GraphDeleteResponse as GraphDeleteResponse from .graph_question_params import GraphQuestionParams as GraphQuestionParams diff --git a/src/writerai/types/chat_completion_chunk.py b/src/writerai/types/chat_completion_chunk.py new file mode 100644 index 0000000..c48667c --- /dev/null +++ b/src/writerai/types/chat_completion_chunk.py @@ -0,0 +1,257 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = [ + "ChatCompletionChunk", + "Choice", + "ChoiceDelta", + "ChoiceDeltaToolCall", + "ChoiceDeltaToolCallFunction", + "ChoiceLogprobs", + "ChoiceLogprobsContent", + "ChoiceLogprobsContentTopLogprob", + "ChoiceLogprobsRefusal", + "ChoiceLogprobsRefusalTopLogprob", + "ChoiceMessage", + "ChoiceMessageToolCall", + "ChoiceMessageToolCallFunction", + "ChoiceSource", + "ChoiceSubquery", + "ChoiceSubquerySource", + "Usage", + "UsageCompletionTokensDetails", + "UsagePromptTokenDetails", +] + + +class ChoiceDeltaToolCallFunction(BaseModel): + arguments: Optional[str] = None + + name: Optional[str] = None + + +class ChoiceDeltaToolCall(BaseModel): + id: Optional[str] = None + + function: Optional[ChoiceDeltaToolCallFunction] = None + + index: Optional[int] = None + + type: Optional[str] = None + + +class ChoiceDelta(BaseModel): + role: Literal["user", "assistant", "system"] + """ + Specifies the role associated with the content, indicating whether the message + is from the 'assistant' or another defined role, helping to contextualize the + output within the interaction flow. + """ + + content: Optional[str] = None + """The text content produced by the model. + + This field contains the actual output generated, reflecting the model's response + to the input query or command. + """ + + tool_calls: Optional[List[ChoiceDeltaToolCall]] = None + + +class ChoiceLogprobsContentTopLogprob(BaseModel): + token: str + + logprob: float + + bytes: Optional[List[int]] = None + + +class ChoiceLogprobsContent(BaseModel): + token: str + + logprob: float + + top_logprobs: List[ChoiceLogprobsContentTopLogprob] + + bytes: Optional[List[int]] = None + + +class ChoiceLogprobsRefusalTopLogprob(BaseModel): + token: str + + logprob: float + + bytes: Optional[List[int]] = None + + +class ChoiceLogprobsRefusal(BaseModel): + token: str + + logprob: float + + top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] + + bytes: Optional[List[int]] = None + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChoiceLogprobsContent]] = None + + refusal: Optional[List[ChoiceLogprobsRefusal]] = None + + +class ChoiceMessageToolCallFunction(BaseModel): + arguments: Optional[str] = None + + name: Optional[str] = None + + +class ChoiceMessageToolCall(BaseModel): + id: Optional[str] = None + + function: Optional[ChoiceMessageToolCallFunction] = None + + index: Optional[int] = None + + type: Optional[str] = None + + +class ChoiceMessage(BaseModel): + role: Literal["user", "assistant", "system"] + """ + Specifies the role associated with the content, indicating whether the message + is from the 'assistant' or another defined role, helping to contextualize the + output within the interaction flow. + """ + + content: Optional[str] = None + """The text content produced by the model. + + This field contains the actual output generated, reflecting the model's response + to the input query or command. + """ + + tool_calls: Optional[List[ChoiceMessageToolCall]] = None + + +class ChoiceSource(BaseModel): + file_id: str + """The unique identifier of the file.""" + + snippet: str + """A snippet of text from the source file.""" + + +class ChoiceSubquerySource(BaseModel): + file_id: str + """The unique identifier of the file.""" + + snippet: str + """A snippet of text from the source file.""" + + +class ChoiceSubquery(BaseModel): + answer: str + """The answer to the subquery.""" + + query: str + """The subquery that was asked.""" + + sources: List[ChoiceSubquerySource] + + +class Choice(BaseModel): + delta: ChoiceDelta + """A chat completion delta generated by streamed model responses.""" + + index: int + """The index of the choice in the list of completions generated by the model.""" + + finish_reason: Optional[Literal["stop", "length", "content_filter", "tool_calls"]] = None + """Describes the condition under which the model ceased generating content. + + Common reasons include 'length' (reached the maximum output size), 'stop' + (encountered a stop sequence), 'content_filter' (harmful content filtered out), + or 'tool_calls' (encountered tool calls). + """ + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + message: Optional[ChoiceMessage] = None + """The chat completion message from the model. + + Note: this field is deprecated for streaming. Use `delta` instead. + """ + + sources: Optional[List[ChoiceSource]] = None + """An array of source objects that provide context for the model's response. + + Only returned when using the Knowledge Graph chat tool. + """ + + subqueries: Optional[List[ChoiceSubquery]] = None + """An array of sub-query objects that provide context for the model's response. + + Only returned when using the Knowledge Graph chat tool. + """ + + +class UsageCompletionTokensDetails(BaseModel): + reasoning_tokens: int + + +class UsagePromptTokenDetails(BaseModel): + cached_tokens: int + + +class Usage(BaseModel): + completion_tokens: int + + prompt_tokens: int + + total_tokens: int + + completion_tokens_details: Optional[UsageCompletionTokensDetails] = None + + prompt_token_details: Optional[UsagePromptTokenDetails] = None + + +class ChatCompletionChunk(BaseModel): + id: str + """A globally unique identifier (UUID) for the response generated by the API. + + This ID can be used to reference the specific operation or transaction within + the system for tracking or debugging purposes. + """ + + choices: List[Choice] + """ + An array of objects representing the different outcomes or results produced by + the model based on the input provided. + """ + + created: int + """The Unix timestamp (in seconds) when the response was created. + + This timestamp can be used to verify the timing of the response relative to + other events or operations. + """ + + model: str + """Identifies the specific model used to generate the response.""" + + service_tier: Optional[str] = None + + system_fingerprint: Optional[str] = None + + usage: Optional[Usage] = None + """Usage information for the chat completion response. + + Please note that at this time Knowledge Graph tool usage is not included in this + object. + """