diff --git a/client/client.go b/client/client.go index 9f7ef92..4ab796d 100644 --- a/client/client.go +++ b/client/client.go @@ -7,6 +7,7 @@ import ( context "context" json "encoding/json" errors "errors" + fmt "fmt" v2 "github.com/cohere-ai/cohere-go/v2" connectors "github.com/cohere-ai/cohere-go/v2/connectors" core "github.com/cohere-ai/cohere-go/v2/core" @@ -73,6 +74,9 @@ func (c *Client) ChatStream( endpointURL := baseURL + "/v1/chat" headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + if request.Accepts != nil { + headers.Add("Accepts", fmt.Sprintf("%v", request.Accepts)) + } errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -197,6 +201,9 @@ func (c *Client) Chat( endpointURL := baseURL + "/v1/chat" headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + if request.Accepts != nil { + headers.Add("Accepts", fmt.Sprintf("%v", request.Accepts)) + } errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) diff --git a/core/request_option.go b/core/request_option.go index f616b59..86d8c80 100644 --- a/core/request_option.go +++ b/core/request_option.go @@ -56,7 +56,7 @@ func (r *RequestOptions) cloneHeader() http.Header { headers := r.HTTPHeader.Clone() headers.Set("X-Fern-Language", "Go") headers.Set("X-Fern-SDK-Name", "github.com/cohere-ai/cohere-go/v2") - headers.Set("X-Fern-SDK-Version", "v2.11.0") + headers.Set("X-Fern-SDK-Version", "v2.11.1") return headers } diff --git a/finetuning/types.go b/finetuning/types.go index f01ac32..ffed78a 100644 --- a/finetuning/types.go +++ b/finetuning/types.go @@ -61,7 +61,7 @@ func (b *BaseModel) String() string { // The possible types of fine-tuned models. // // - BASE_TYPE_UNSPECIFIED: Unspecified model. -// - BASE_TYPE_GENERATIVE: Generative model. +// - BASE_TYPE_GENERATIVE: Deprecated: Generative model. // - BASE_TYPE_CLASSIFICATION: Classification model. // - BASE_TYPE_RERANK: Rerank model. // - BASE_TYPE_CHAT: Chat model. @@ -546,7 +546,7 @@ type Settings struct { Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty" url:"hyperparameters,omitempty"` // read-only. Whether the model is single-label or multi-label (only for classification). MultiLabel *bool `json:"multi_label,omitempty" url:"multi_label,omitempty"` - // The Weights & Biases configuration. + // The Weights & Biases configuration (Chat fine-tuning only). Wandb *WandbConfig `json:"wandb,omitempty" url:"wandb,omitempty"` extraProperties map[string]interface{} @@ -644,7 +644,7 @@ func (s Status) Ptr() *Status { // The possible strategy used to serve a fine-tuned models. // // - STRATEGY_UNSPECIFIED: Unspecified strategy. -// - STRATEGY_VANILLA: Serve the fine-tuned model on a dedicated GPU. +// - STRATEGY_VANILLA: Deprecated: Serve the fine-tuned model on a dedicated GPU. // - STRATEGY_TFEW: Serve the fine-tuned model on a shared GPU. type Strategy string diff --git a/types.go b/types.go index 1e21b06..a9cb850 100644 --- a/types.go +++ b/types.go @@ -10,6 +10,8 @@ import ( ) type ChatRequest struct { + // Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. + Accepts *string `json:"-" url:"-"` // Text input for the model to respond to. // // Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments @@ -237,6 +239,8 @@ func (c *ChatRequest) MarshalJSON() ([]byte, error) { } type ChatStreamRequest struct { + // Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. + Accepts *string `json:"-" url:"-"` // Text input for the model to respond to. // // Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments @@ -491,6 +495,10 @@ type DetokenizeRequest struct { type EmbedRequest struct { // An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality. Texts []string `json:"texts,omitempty" url:"-"` + // An array of image data URIs for the model to embed. Maximum number of images per call is `1`. + // + // The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB. + Images []string `json:"images,omitempty" url:"-"` // Defaults to embed-english-v2.0 // // The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.