Skip to content

Commit

Permalink
feat(api): rename to chat_completion_chunk (#81)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] authored and Stainless Bot committed Oct 3, 2024
1 parent f34c186 commit e1fa0a5
Show file tree
Hide file tree
Showing 4 changed files with 268 additions and 9 deletions.
2 changes: 1 addition & 1 deletion api.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Methods:
Types:

```python
from writerai.types import Chat
from writerai.types import Chat, ChatCompletionChunk
```

Methods:
Expand Down
17 changes: 9 additions & 8 deletions src/writerai/resources/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from .._streaming import Stream, AsyncStream
from ..types.chat import Chat
from .._base_client import make_request_options
from ..types.chat_completion_chunk import ChatCompletionChunk

__all__ = ["ChatResource", "AsyncChatResource"]

Expand Down Expand Up @@ -155,7 +156,7 @@ def chat(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Stream[Chat]:
) -> Stream[ChatCompletionChunk]:
"""Generate a chat completion based on the provided messages.
The response shown
Expand Down Expand Up @@ -239,7 +240,7 @@ def chat(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Chat | Stream[Chat]:
) -> Chat | Stream[ChatCompletionChunk]:
"""Generate a chat completion based on the provided messages.
The response shown
Expand Down Expand Up @@ -323,7 +324,7 @@ def chat(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Chat | Stream[Chat]:
) -> Chat | Stream[ChatCompletionChunk]:
return self._post(
"/v1/chat",
body=maybe_transform(
Expand All @@ -348,7 +349,7 @@ def chat(
),
cast_to=Chat,
stream=stream or False,
stream_cls=Stream[Chat],
stream_cls=Stream[ChatCompletionChunk],
)


Expand Down Expand Up @@ -478,7 +479,7 @@ async def chat(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncStream[Chat]:
) -> AsyncStream[ChatCompletionChunk]:
"""Generate a chat completion based on the provided messages.
The response shown
Expand Down Expand Up @@ -562,7 +563,7 @@ async def chat(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Chat | AsyncStream[Chat]:
) -> Chat | AsyncStream[ChatCompletionChunk]:
"""Generate a chat completion based on the provided messages.
The response shown
Expand Down Expand Up @@ -646,7 +647,7 @@ async def chat(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Chat | AsyncStream[Chat]:
) -> Chat | AsyncStream[ChatCompletionChunk]:
return await self._post(
"/v1/chat",
body=await async_maybe_transform(
Expand All @@ -671,7 +672,7 @@ async def chat(
),
cast_to=Chat,
stream=stream or False,
stream_cls=AsyncStream[Chat],
stream_cls=AsyncStream[ChatCompletionChunk],
)


Expand Down
1 change: 1 addition & 0 deletions src/writerai/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from .graph_update_params import GraphUpdateParams as GraphUpdateParams
from .model_list_response import ModelListResponse as ModelListResponse
from .file_delete_response import FileDeleteResponse as FileDeleteResponse
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
from .graph_create_response import GraphCreateResponse as GraphCreateResponse
from .graph_delete_response import GraphDeleteResponse as GraphDeleteResponse
from .graph_question_params import GraphQuestionParams as GraphQuestionParams
Expand Down
257 changes: 257 additions & 0 deletions src/writerai/types/chat_completion_chunk.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,257 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List, Optional
from typing_extensions import Literal

from .._models import BaseModel

__all__ = [
"ChatCompletionChunk",
"Choice",
"ChoiceDelta",
"ChoiceDeltaToolCall",
"ChoiceDeltaToolCallFunction",
"ChoiceLogprobs",
"ChoiceLogprobsContent",
"ChoiceLogprobsContentTopLogprob",
"ChoiceLogprobsRefusal",
"ChoiceLogprobsRefusalTopLogprob",
"ChoiceMessage",
"ChoiceMessageToolCall",
"ChoiceMessageToolCallFunction",
"ChoiceSource",
"ChoiceSubquery",
"ChoiceSubquerySource",
"Usage",
"UsageCompletionTokensDetails",
"UsagePromptTokenDetails",
]


class ChoiceDeltaToolCallFunction(BaseModel):
arguments: Optional[str] = None

name: Optional[str] = None


class ChoiceDeltaToolCall(BaseModel):
id: Optional[str] = None

function: Optional[ChoiceDeltaToolCallFunction] = None

index: Optional[int] = None

type: Optional[str] = None


class ChoiceDelta(BaseModel):
role: Literal["user", "assistant", "system"]
"""
Specifies the role associated with the content, indicating whether the message
is from the 'assistant' or another defined role, helping to contextualize the
output within the interaction flow.
"""

content: Optional[str] = None
"""The text content produced by the model.
This field contains the actual output generated, reflecting the model's response
to the input query or command.
"""

tool_calls: Optional[List[ChoiceDeltaToolCall]] = None


class ChoiceLogprobsContentTopLogprob(BaseModel):
token: str

logprob: float

bytes: Optional[List[int]] = None


class ChoiceLogprobsContent(BaseModel):
token: str

logprob: float

top_logprobs: List[ChoiceLogprobsContentTopLogprob]

bytes: Optional[List[int]] = None


class ChoiceLogprobsRefusalTopLogprob(BaseModel):
token: str

logprob: float

bytes: Optional[List[int]] = None


class ChoiceLogprobsRefusal(BaseModel):
token: str

logprob: float

top_logprobs: List[ChoiceLogprobsRefusalTopLogprob]

bytes: Optional[List[int]] = None


class ChoiceLogprobs(BaseModel):
content: Optional[List[ChoiceLogprobsContent]] = None

refusal: Optional[List[ChoiceLogprobsRefusal]] = None


class ChoiceMessageToolCallFunction(BaseModel):
arguments: Optional[str] = None

name: Optional[str] = None


class ChoiceMessageToolCall(BaseModel):
id: Optional[str] = None

function: Optional[ChoiceMessageToolCallFunction] = None

index: Optional[int] = None

type: Optional[str] = None


class ChoiceMessage(BaseModel):
role: Literal["user", "assistant", "system"]
"""
Specifies the role associated with the content, indicating whether the message
is from the 'assistant' or another defined role, helping to contextualize the
output within the interaction flow.
"""

content: Optional[str] = None
"""The text content produced by the model.
This field contains the actual output generated, reflecting the model's response
to the input query or command.
"""

tool_calls: Optional[List[ChoiceMessageToolCall]] = None


class ChoiceSource(BaseModel):
file_id: str
"""The unique identifier of the file."""

snippet: str
"""A snippet of text from the source file."""


class ChoiceSubquerySource(BaseModel):
file_id: str
"""The unique identifier of the file."""

snippet: str
"""A snippet of text from the source file."""


class ChoiceSubquery(BaseModel):
answer: str
"""The answer to the subquery."""

query: str
"""The subquery that was asked."""

sources: List[ChoiceSubquerySource]


class Choice(BaseModel):
delta: ChoiceDelta
"""A chat completion delta generated by streamed model responses."""

index: int
"""The index of the choice in the list of completions generated by the model."""

finish_reason: Optional[Literal["stop", "length", "content_filter", "tool_calls"]] = None
"""Describes the condition under which the model ceased generating content.
Common reasons include 'length' (reached the maximum output size), 'stop'
(encountered a stop sequence), 'content_filter' (harmful content filtered out),
or 'tool_calls' (encountered tool calls).
"""

logprobs: Optional[ChoiceLogprobs] = None
"""Log probability information for the choice."""

message: Optional[ChoiceMessage] = None
"""The chat completion message from the model.
Note: this field is deprecated for streaming. Use `delta` instead.
"""

sources: Optional[List[ChoiceSource]] = None
"""An array of source objects that provide context for the model's response.
Only returned when using the Knowledge Graph chat tool.
"""

subqueries: Optional[List[ChoiceSubquery]] = None
"""An array of sub-query objects that provide context for the model's response.
Only returned when using the Knowledge Graph chat tool.
"""


class UsageCompletionTokensDetails(BaseModel):
reasoning_tokens: int


class UsagePromptTokenDetails(BaseModel):
cached_tokens: int


class Usage(BaseModel):
completion_tokens: int

prompt_tokens: int

total_tokens: int

completion_tokens_details: Optional[UsageCompletionTokensDetails] = None

prompt_token_details: Optional[UsagePromptTokenDetails] = None


class ChatCompletionChunk(BaseModel):
id: str
"""A globally unique identifier (UUID) for the response generated by the API.
This ID can be used to reference the specific operation or transaction within
the system for tracking or debugging purposes.
"""

choices: List[Choice]
"""
An array of objects representing the different outcomes or results produced by
the model based on the input provided.
"""

created: int
"""The Unix timestamp (in seconds) when the response was created.
This timestamp can be used to verify the timing of the response relative to
other events or operations.
"""

model: str
"""Identifies the specific model used to generate the response."""

service_tier: Optional[str] = None

system_fingerprint: Optional[str] = None

usage: Optional[Usage] = None
"""Usage information for the chat completion response.
Please note that at this time Knowledge Graph tool usage is not included in this
object.
"""

0 comments on commit e1fa0a5

Please sign in to comment.