Skip to content

Commit

Permalink
docs(api): updates to API spec (#64)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] authored and stainless-bot committed Sep 20, 2024
1 parent 54ca957 commit a86b730
Show file tree
Hide file tree
Showing 12 changed files with 333 additions and 76 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 16
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-29d74057f6277b8657539fbdda86e469e2d50765733bfc3db2578eb6fdb8d07e.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-149f74d46fcb6e37bb59c571100cf81230cf567855570f301cbb2d18d3a84764.yml
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ client = Writer(
chat = client.chat.chat(
messages=[
{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}
],
Expand Down Expand Up @@ -69,7 +69,7 @@ async def main() -> None:
chat = await client.chat.chat(
messages=[
{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}
],
Expand Down Expand Up @@ -146,7 +146,7 @@ try:
client.chat.chat(
messages=[
{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}
],
Expand Down Expand Up @@ -197,7 +197,7 @@ client = Writer(
client.with_options(max_retries=5).chat.chat(
messages=[
{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}
],
Expand Down Expand Up @@ -228,7 +228,7 @@ client = Writer(
client.with_options(timeout=5.0).chat.chat(
messages=[
{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}
],
Expand Down Expand Up @@ -274,7 +274,7 @@ from writerai import Writer
client = Writer()
response = client.chat.with_raw_response.chat(
messages=[{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}],
model="palmyra-x-002-32k",
Expand All @@ -299,7 +299,7 @@ To stream the response body, use `.with_streaming_response` instead, which requi
with client.chat.with_streaming_response.chat(
messages=[
{
"content": "content",
"content": "Write a memo summarizing this earnings report.",
"role": "user",
}
],
Expand Down
74 changes: 68 additions & 6 deletions src/writerai/resources/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ def chat(
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -66,7 +68,7 @@ def chat(
the model to respond to. The array must contain at least one message.
model: Specifies the model to be used for generating responses. The chat model is
always `palmyra-x-002-32k` for conversational use.
always `palmyra-x-004` for conversational use.
max_tokens: Defines the maximum number of tokens (words and characters) that the model can
generate in the response. The default value is set to 16, but it can be adjusted
Expand All @@ -88,6 +90,13 @@ def chat(
temperature results in more varied and less predictable text, while a lower
temperature produces more deterministic and conservative outputs.
tool_choice: Configure how the model will call functions: `auto` will allow the model to
automatically choose the best tool, `none` disables tool calling. You can also
pass a specific previously defined function as a string.
tools: An array of tools described to the model using JSON schema that the model can
use to generate responses.
top_p: Sets the threshold for "nucleus sampling," a technique to focus the model's
token generation on the most likely subset of tokens. Only tokens with
cumulative probability above this threshold are considered, controlling the
Expand All @@ -114,6 +123,8 @@ def chat(
n: int | NotGiven = NOT_GIVEN,
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -130,7 +141,7 @@ def chat(
the model to respond to. The array must contain at least one message.
model: Specifies the model to be used for generating responses. The chat model is
always `palmyra-x-002-32k` for conversational use.
always `palmyra-x-004` for conversational use.
stream: Indicates whether the response should be streamed incrementally as it is
generated or only returned once fully complete. Streaming can be useful for
Expand All @@ -152,6 +163,13 @@ def chat(
temperature results in more varied and less predictable text, while a lower
temperature produces more deterministic and conservative outputs.
tool_choice: Configure how the model will call functions: `auto` will allow the model to
automatically choose the best tool, `none` disables tool calling. You can also
pass a specific previously defined function as a string.
tools: An array of tools described to the model using JSON schema that the model can
use to generate responses.
top_p: Sets the threshold for "nucleus sampling," a technique to focus the model's
token generation on the most likely subset of tokens. Only tokens with
cumulative probability above this threshold are considered, controlling the
Expand All @@ -178,6 +196,8 @@ def chat(
n: int | NotGiven = NOT_GIVEN,
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -194,7 +214,7 @@ def chat(
the model to respond to. The array must contain at least one message.
model: Specifies the model to be used for generating responses. The chat model is
always `palmyra-x-002-32k` for conversational use.
always `palmyra-x-004` for conversational use.
stream: Indicates whether the response should be streamed incrementally as it is
generated or only returned once fully complete. Streaming can be useful for
Expand All @@ -216,6 +236,13 @@ def chat(
temperature results in more varied and less predictable text, while a lower
temperature produces more deterministic and conservative outputs.
tool_choice: Configure how the model will call functions: `auto` will allow the model to
automatically choose the best tool, `none` disables tool calling. You can also
pass a specific previously defined function as a string.
tools: An array of tools described to the model using JSON schema that the model can
use to generate responses.
top_p: Sets the threshold for "nucleus sampling," a technique to focus the model's
token generation on the most likely subset of tokens. Only tokens with
cumulative probability above this threshold are considered, controlling the
Expand All @@ -242,6 +269,8 @@ def chat(
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -261,6 +290,8 @@ def chat(
"stop": stop,
"stream": stream,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_p": top_p,
},
chat_chat_params.ChatChatParams,
Expand Down Expand Up @@ -294,6 +325,8 @@ async def chat(
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -310,7 +343,7 @@ async def chat(
the model to respond to. The array must contain at least one message.
model: Specifies the model to be used for generating responses. The chat model is
always `palmyra-x-002-32k` for conversational use.
always `palmyra-x-004` for conversational use.
max_tokens: Defines the maximum number of tokens (words and characters) that the model can
generate in the response. The default value is set to 16, but it can be adjusted
Expand All @@ -332,6 +365,13 @@ async def chat(
temperature results in more varied and less predictable text, while a lower
temperature produces more deterministic and conservative outputs.
tool_choice: Configure how the model will call functions: `auto` will allow the model to
automatically choose the best tool, `none` disables tool calling. You can also
pass a specific previously defined function as a string.
tools: An array of tools described to the model using JSON schema that the model can
use to generate responses.
top_p: Sets the threshold for "nucleus sampling," a technique to focus the model's
token generation on the most likely subset of tokens. Only tokens with
cumulative probability above this threshold are considered, controlling the
Expand All @@ -358,6 +398,8 @@ async def chat(
n: int | NotGiven = NOT_GIVEN,
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -374,7 +416,7 @@ async def chat(
the model to respond to. The array must contain at least one message.
model: Specifies the model to be used for generating responses. The chat model is
always `palmyra-x-002-32k` for conversational use.
always `palmyra-x-004` for conversational use.
stream: Indicates whether the response should be streamed incrementally as it is
generated or only returned once fully complete. Streaming can be useful for
Expand All @@ -396,6 +438,13 @@ async def chat(
temperature results in more varied and less predictable text, while a lower
temperature produces more deterministic and conservative outputs.
tool_choice: Configure how the model will call functions: `auto` will allow the model to
automatically choose the best tool, `none` disables tool calling. You can also
pass a specific previously defined function as a string.
tools: An array of tools described to the model using JSON schema that the model can
use to generate responses.
top_p: Sets the threshold for "nucleus sampling," a technique to focus the model's
token generation on the most likely subset of tokens. Only tokens with
cumulative probability above this threshold are considered, controlling the
Expand All @@ -422,6 +471,8 @@ async def chat(
n: int | NotGiven = NOT_GIVEN,
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -438,7 +489,7 @@ async def chat(
the model to respond to. The array must contain at least one message.
model: Specifies the model to be used for generating responses. The chat model is
always `palmyra-x-002-32k` for conversational use.
always `palmyra-x-004` for conversational use.
stream: Indicates whether the response should be streamed incrementally as it is
generated or only returned once fully complete. Streaming can be useful for
Expand All @@ -460,6 +511,13 @@ async def chat(
temperature results in more varied and less predictable text, while a lower
temperature produces more deterministic and conservative outputs.
tool_choice: Configure how the model will call functions: `auto` will allow the model to
automatically choose the best tool, `none` disables tool calling. You can also
pass a specific previously defined function as a string.
tools: An array of tools described to the model using JSON schema that the model can
use to generate responses.
top_p: Sets the threshold for "nucleus sampling," a technique to focus the model's
token generation on the most likely subset of tokens. Only tokens with
cumulative probability above this threshold are considered, controlling the
Expand All @@ -486,6 +544,8 @@ async def chat(
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: chat_chat_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[chat_chat_params.Tool] | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -505,6 +565,8 @@ async def chat(
"stop": stop,
"stream": stream,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_p": top_p,
},
chat_chat_params.ChatChatParams,
Expand Down
10 changes: 10 additions & 0 deletions src/writerai/resources/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def list(
graph_id: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
status: Literal["in_progress", "completed", "failed"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -109,6 +110,9 @@ def list(
order: Specifies the order of the results. Valid values are asc for ascending and desc
for descending.
status: Specifies the status of the files to retrieve. Valid values are in_progress,
completed or failed.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
Expand All @@ -132,6 +136,7 @@ def list(
"graph_id": graph_id,
"limit": limit,
"order": order,
"status": status,
},
file_list_params.FileListParams,
),
Expand Down Expand Up @@ -296,6 +301,7 @@ def list(
graph_id: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
status: Literal["in_progress", "completed", "failed"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -322,6 +328,9 @@ def list(
order: Specifies the order of the results. Valid values are asc for ascending and desc
for descending.
status: Specifies the status of the files to retrieve. Valid values are in_progress,
completed or failed.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
Expand All @@ -345,6 +354,7 @@ def list(
"graph_id": graph_id,
"limit": limit,
"order": order,
"status": status,
},
file_list_params.FileListParams,
),
Expand Down
24 changes: 21 additions & 3 deletions src/writerai/types/chat.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,27 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List
from typing import List, Optional
from typing_extensions import Literal

from .._models import BaseModel

__all__ = ["Chat", "Choice", "ChoiceMessage"]
__all__ = ["Chat", "Choice", "ChoiceMessage", "ChoiceMessageToolCall", "ChoiceMessageToolCallFunction"]


class ChoiceMessageToolCallFunction(BaseModel):
arguments: Optional[str] = None

name: Optional[str] = None


class ChoiceMessageToolCall(BaseModel):
id: Optional[str] = None

function: Optional[ChoiceMessageToolCallFunction] = None

index: Optional[int] = None

type: Optional[str] = None


class ChoiceMessage(BaseModel):
Expand All @@ -23,9 +39,11 @@ class ChoiceMessage(BaseModel):
output within the interaction flow.
"""

tool_calls: Optional[List[ChoiceMessageToolCall]] = None


class Choice(BaseModel):
finish_reason: Literal["stop", "length", "content_filter"]
finish_reason: Literal["stop", "length", "content_filter", "tool_calls"]
"""Describes the condition under which the model ceased generating content.
Common reasons include 'length' (reached the maximum output size), 'stop'
Expand Down
Loading

0 comments on commit a86b730

Please sign in to comment.