-
Notifications
You must be signed in to change notification settings - Fork 28
/
audiotranslation.go
102 lines (90 loc) · 3.59 KB
/
audiotranslation.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
package openai
import (
"bytes"
"context"
"io"
"mime/multipart"
"net/http"
"github.com/openai/openai-go/internal/apiform"
"github.com/openai/openai-go/internal/apijson"
"github.com/openai/openai-go/internal/param"
"github.com/openai/openai-go/internal/requestconfig"
"github.com/openai/openai-go/option"
)
// AudioTranslationService contains methods and other services that help with
// interacting with the openai API.
//
// Note, unlike clients, this service does not read variables from the environment
// automatically. You should not instantiate this service directly, and instead use
// the [NewAudioTranslationService] method instead.
type AudioTranslationService struct {
Options []option.RequestOption
}
// NewAudioTranslationService generates a new service that applies the given
// options to each request. These options are applied after the parent client's
// options (if there is one), and before any request-specific options.
func NewAudioTranslationService(opts ...option.RequestOption) (r *AudioTranslationService) {
r = &AudioTranslationService{}
r.Options = opts
return
}
// Translates audio into English.
func (r *AudioTranslationService) New(ctx context.Context, body AudioTranslationNewParams, opts ...option.RequestOption) (res *Translation, err error) {
opts = append(r.Options[:], opts...)
path := "audio/translations"
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
return
}
type Translation struct {
Text string `json:"text,required"`
JSON translationJSON `json:"-"`
}
// translationJSON contains the JSON metadata for the struct [Translation]
type translationJSON struct {
Text apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *Translation) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r translationJSON) RawJSON() string {
return r.raw
}
type AudioTranslationNewParams struct {
// The audio file object (not file name) translate, in one of these formats: flac,
// mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
File param.Field[io.Reader] `json:"file,required" format:"binary"`
// ID of the model to use. Only `whisper-1` (which is powered by our open source
// Whisper V2 model) is currently available.
Model param.Field[AudioModel] `json:"model,required"`
// An optional text to guide the model's style or continue a previous audio
// segment. The
// [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
// should be in English.
Prompt param.Field[string] `json:"prompt"`
// The format of the output, in one of these options: `json`, `text`, `srt`,
// `verbose_json`, or `vtt`.
ResponseFormat param.Field[AudioResponseFormat] `json:"response_format"`
// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
// output more random, while lower values like 0.2 will make it more focused and
// deterministic. If set to 0, the model will use
// [log probability](https://en.wikipedia.org/wiki/Log_probability) to
// automatically increase the temperature until certain thresholds are hit.
Temperature param.Field[float64] `json:"temperature"`
}
func (r AudioTranslationNewParams) MarshalMultipart() (data []byte, contentType string, err error) {
buf := bytes.NewBuffer(nil)
writer := multipart.NewWriter(buf)
err = apiform.MarshalRoot(r, writer)
if err != nil {
writer.Close()
return nil, "", err
}
err = writer.Close()
if err != nil {
return nil, "", err
}
return buf.Bytes(), writer.FormDataContentType(), nil
}