-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(api): rename to chat_completion_chunk (#81)
- Loading branch information
1 parent
f34c186
commit e1fa0a5
Showing
4 changed files
with
268 additions
and
9 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,257 @@ | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. | ||
|
||
from typing import List, Optional | ||
from typing_extensions import Literal | ||
|
||
from .._models import BaseModel | ||
|
||
__all__ = [ | ||
"ChatCompletionChunk", | ||
"Choice", | ||
"ChoiceDelta", | ||
"ChoiceDeltaToolCall", | ||
"ChoiceDeltaToolCallFunction", | ||
"ChoiceLogprobs", | ||
"ChoiceLogprobsContent", | ||
"ChoiceLogprobsContentTopLogprob", | ||
"ChoiceLogprobsRefusal", | ||
"ChoiceLogprobsRefusalTopLogprob", | ||
"ChoiceMessage", | ||
"ChoiceMessageToolCall", | ||
"ChoiceMessageToolCallFunction", | ||
"ChoiceSource", | ||
"ChoiceSubquery", | ||
"ChoiceSubquerySource", | ||
"Usage", | ||
"UsageCompletionTokensDetails", | ||
"UsagePromptTokenDetails", | ||
] | ||
|
||
|
||
class ChoiceDeltaToolCallFunction(BaseModel): | ||
arguments: Optional[str] = None | ||
|
||
name: Optional[str] = None | ||
|
||
|
||
class ChoiceDeltaToolCall(BaseModel): | ||
id: Optional[str] = None | ||
|
||
function: Optional[ChoiceDeltaToolCallFunction] = None | ||
|
||
index: Optional[int] = None | ||
|
||
type: Optional[str] = None | ||
|
||
|
||
class ChoiceDelta(BaseModel): | ||
role: Literal["user", "assistant", "system"] | ||
""" | ||
Specifies the role associated with the content, indicating whether the message | ||
is from the 'assistant' or another defined role, helping to contextualize the | ||
output within the interaction flow. | ||
""" | ||
|
||
content: Optional[str] = None | ||
"""The text content produced by the model. | ||
This field contains the actual output generated, reflecting the model's response | ||
to the input query or command. | ||
""" | ||
|
||
tool_calls: Optional[List[ChoiceDeltaToolCall]] = None | ||
|
||
|
||
class ChoiceLogprobsContentTopLogprob(BaseModel): | ||
token: str | ||
|
||
logprob: float | ||
|
||
bytes: Optional[List[int]] = None | ||
|
||
|
||
class ChoiceLogprobsContent(BaseModel): | ||
token: str | ||
|
||
logprob: float | ||
|
||
top_logprobs: List[ChoiceLogprobsContentTopLogprob] | ||
|
||
bytes: Optional[List[int]] = None | ||
|
||
|
||
class ChoiceLogprobsRefusalTopLogprob(BaseModel): | ||
token: str | ||
|
||
logprob: float | ||
|
||
bytes: Optional[List[int]] = None | ||
|
||
|
||
class ChoiceLogprobsRefusal(BaseModel): | ||
token: str | ||
|
||
logprob: float | ||
|
||
top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] | ||
|
||
bytes: Optional[List[int]] = None | ||
|
||
|
||
class ChoiceLogprobs(BaseModel): | ||
content: Optional[List[ChoiceLogprobsContent]] = None | ||
|
||
refusal: Optional[List[ChoiceLogprobsRefusal]] = None | ||
|
||
|
||
class ChoiceMessageToolCallFunction(BaseModel): | ||
arguments: Optional[str] = None | ||
|
||
name: Optional[str] = None | ||
|
||
|
||
class ChoiceMessageToolCall(BaseModel): | ||
id: Optional[str] = None | ||
|
||
function: Optional[ChoiceMessageToolCallFunction] = None | ||
|
||
index: Optional[int] = None | ||
|
||
type: Optional[str] = None | ||
|
||
|
||
class ChoiceMessage(BaseModel): | ||
role: Literal["user", "assistant", "system"] | ||
""" | ||
Specifies the role associated with the content, indicating whether the message | ||
is from the 'assistant' or another defined role, helping to contextualize the | ||
output within the interaction flow. | ||
""" | ||
|
||
content: Optional[str] = None | ||
"""The text content produced by the model. | ||
This field contains the actual output generated, reflecting the model's response | ||
to the input query or command. | ||
""" | ||
|
||
tool_calls: Optional[List[ChoiceMessageToolCall]] = None | ||
|
||
|
||
class ChoiceSource(BaseModel): | ||
file_id: str | ||
"""The unique identifier of the file.""" | ||
|
||
snippet: str | ||
"""A snippet of text from the source file.""" | ||
|
||
|
||
class ChoiceSubquerySource(BaseModel): | ||
file_id: str | ||
"""The unique identifier of the file.""" | ||
|
||
snippet: str | ||
"""A snippet of text from the source file.""" | ||
|
||
|
||
class ChoiceSubquery(BaseModel): | ||
answer: str | ||
"""The answer to the subquery.""" | ||
|
||
query: str | ||
"""The subquery that was asked.""" | ||
|
||
sources: List[ChoiceSubquerySource] | ||
|
||
|
||
class Choice(BaseModel): | ||
delta: ChoiceDelta | ||
"""A chat completion delta generated by streamed model responses.""" | ||
|
||
index: int | ||
"""The index of the choice in the list of completions generated by the model.""" | ||
|
||
finish_reason: Optional[Literal["stop", "length", "content_filter", "tool_calls"]] = None | ||
"""Describes the condition under which the model ceased generating content. | ||
Common reasons include 'length' (reached the maximum output size), 'stop' | ||
(encountered a stop sequence), 'content_filter' (harmful content filtered out), | ||
or 'tool_calls' (encountered tool calls). | ||
""" | ||
|
||
logprobs: Optional[ChoiceLogprobs] = None | ||
"""Log probability information for the choice.""" | ||
|
||
message: Optional[ChoiceMessage] = None | ||
"""The chat completion message from the model. | ||
Note: this field is deprecated for streaming. Use `delta` instead. | ||
""" | ||
|
||
sources: Optional[List[ChoiceSource]] = None | ||
"""An array of source objects that provide context for the model's response. | ||
Only returned when using the Knowledge Graph chat tool. | ||
""" | ||
|
||
subqueries: Optional[List[ChoiceSubquery]] = None | ||
"""An array of sub-query objects that provide context for the model's response. | ||
Only returned when using the Knowledge Graph chat tool. | ||
""" | ||
|
||
|
||
class UsageCompletionTokensDetails(BaseModel): | ||
reasoning_tokens: int | ||
|
||
|
||
class UsagePromptTokenDetails(BaseModel): | ||
cached_tokens: int | ||
|
||
|
||
class Usage(BaseModel): | ||
completion_tokens: int | ||
|
||
prompt_tokens: int | ||
|
||
total_tokens: int | ||
|
||
completion_tokens_details: Optional[UsageCompletionTokensDetails] = None | ||
|
||
prompt_token_details: Optional[UsagePromptTokenDetails] = None | ||
|
||
|
||
class ChatCompletionChunk(BaseModel): | ||
id: str | ||
"""A globally unique identifier (UUID) for the response generated by the API. | ||
This ID can be used to reference the specific operation or transaction within | ||
the system for tracking or debugging purposes. | ||
""" | ||
|
||
choices: List[Choice] | ||
""" | ||
An array of objects representing the different outcomes or results produced by | ||
the model based on the input provided. | ||
""" | ||
|
||
created: int | ||
"""The Unix timestamp (in seconds) when the response was created. | ||
This timestamp can be used to verify the timing of the response relative to | ||
other events or operations. | ||
""" | ||
|
||
model: str | ||
"""Identifies the specific model used to generate the response.""" | ||
|
||
service_tier: Optional[str] = None | ||
|
||
system_fingerprint: Optional[str] = None | ||
|
||
usage: Optional[Usage] = None | ||
"""Usage information for the chat completion response. | ||
Please note that at this time Knowledge Graph tool usage is not included in this | ||
object. | ||
""" |