From c329601324226e28ff18d6ccecfdde41cedd3b5a Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 1 Aug 2024 00:02:01 +0000 Subject: [PATCH] feat: publish --- .devcontainer/Dockerfile | 23 + .devcontainer/devcontainer.json | 20 + .github/workflows/ci.yml | 41 + .github/workflows/create-releases.yml | 29 + .gitignore | 4 + .release-please-manifest.json | 3 + .stats.yml | 2 + CONTRIBUTING.md | 59 + LICENSE | 201 ++ README.md | 408 ++++ SECURITY.md | 29 + aliases.go | 30 + api.md | 406 ++++ audio.go | 32 + audiospeech.go | 123 ++ audiospeech_test.go | 56 + audiotranscription.go | 164 ++ audiotranscription_test.go | 46 + audiotranslation.go | 116 ++ audiotranslation_test.go | 44 + batch.go | 374 ++++ batch_test.go | 112 ++ beta.go | 32 + betaassistant.go | 2393 +++++++++++++++++++++++ betaassistant_test.go | 188 ++ betathread.go | 1498 ++++++++++++++ betathread_test.go | 399 ++++ betathreadmessage.go | 2031 +++++++++++++++++++ betathreadmessage_test.go | 186 ++ betathreadrun.go | 1039 ++++++++++ betathreadrun_test.go | 312 +++ betathreadrunstep.go | 1800 +++++++++++++++++ betathreadrunstep_test.go | 73 + betavectorstore.go | 569 ++++++ betavectorstore_test.go | 151 ++ betavectorstorefile.go | 670 +++++++ betavectorstorefile_test.go | 129 ++ betavectorstorefilebatch.go | 415 ++++ betavectorstorefilebatch_test.go | 130 ++ chat.go | 64 + chatcompletion.go | 1546 +++++++++++++++ chatcompletion_test.go | 103 + client.go | 135 ++ client_test.go | 206 ++ completion.go | 383 ++++ completion_test.go | 59 + embedding.go | 248 +++ embedding_test.go | 43 + examples/.keep | 4 + field.go | 50 + file.go | 337 ++++ file_test.go | 145 ++ finetuning.go | 28 + finetuningjob.go | 749 +++++++ finetuningjob_test.go | 170 ++ finetuningjobcheckpoint.go | 171 ++ finetuningjobcheckpoint_test.go | 43 + go.mod | 11 + go.sum | 12 + image.go | 421 ++++ image_test.go | 107 + internal/apierror/apierror.go | 61 + internal/apiform/encoder.go | 381 ++++ internal/apiform/form.go | 5 + internal/apiform/form_test.go | 440 +++++ internal/apiform/tag.go | 48 + internal/apijson/decoder.go | 668 +++++++ internal/apijson/encoder.go | 391 ++++ internal/apijson/field.go | 41 + internal/apijson/field_test.go | 66 + internal/apijson/json_test.go | 554 ++++++ internal/apijson/port.go | 107 + internal/apijson/port_test.go | 178 ++ internal/apijson/registry.go | 27 + internal/apijson/tag.go | 47 + internal/apiquery/encoder.go | 341 ++++ internal/apiquery/query.go | 50 + internal/apiquery/query_test.go | 335 ++++ internal/apiquery/tag.go | 41 + internal/pagination/pagination.go | 206 ++ internal/param/field.go | 29 + internal/requestconfig/requestconfig.go | 487 +++++ internal/testutil/testutil.go | 27 + internal/version.go | 5 + lib/.keep | 4 + model.go | 155 ++ model_test.go | 80 + moderation.go | 259 +++ moderation_test.go | 40 + option/requestoption.go | 253 +++ packages/ssestream/streaming.go | 168 ++ paginationauto_test.go | 38 + paginationmanual_test.go | 46 + release-please-config.json | 67 + scripts/bootstrap | 16 + scripts/format | 8 + scripts/lint | 8 + scripts/mock | 41 + scripts/test | 56 + shared/shared.go | 93 + shared/union.go | 26 + upload.go | 234 +++ upload_test.go | 92 + uploadpart.go | 126 ++ uploadpart_test.go | 44 + usage_test.go | 38 + 106 files changed, 25799 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/create-releases.yml create mode 100644 .gitignore create mode 100644 .release-please-manifest.json create mode 100644 .stats.yml create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 aliases.go create mode 100644 api.md create mode 100644 audio.go create mode 100644 audiospeech.go create mode 100644 audiospeech_test.go create mode 100644 audiotranscription.go create mode 100644 audiotranscription_test.go create mode 100644 audiotranslation.go create mode 100644 audiotranslation_test.go create mode 100644 batch.go create mode 100644 batch_test.go create mode 100644 beta.go create mode 100644 betaassistant.go create mode 100644 betaassistant_test.go create mode 100644 betathread.go create mode 100644 betathread_test.go create mode 100644 betathreadmessage.go create mode 100644 betathreadmessage_test.go create mode 100644 betathreadrun.go create mode 100644 betathreadrun_test.go create mode 100644 betathreadrunstep.go create mode 100644 betathreadrunstep_test.go create mode 100644 betavectorstore.go create mode 100644 betavectorstore_test.go create mode 100644 betavectorstorefile.go create mode 100644 betavectorstorefile_test.go create mode 100644 betavectorstorefilebatch.go create mode 100644 betavectorstorefilebatch_test.go create mode 100644 chat.go create mode 100644 chatcompletion.go create mode 100644 chatcompletion_test.go create mode 100644 client.go create mode 100644 client_test.go create mode 100644 completion.go create mode 100644 completion_test.go create mode 100644 embedding.go create mode 100644 embedding_test.go create mode 100644 examples/.keep create mode 100644 field.go create mode 100644 file.go create mode 100644 file_test.go create mode 100644 finetuning.go create mode 100644 finetuningjob.go create mode 100644 finetuningjob_test.go create mode 100644 finetuningjobcheckpoint.go create mode 100644 finetuningjobcheckpoint_test.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 image.go create mode 100644 image_test.go create mode 100644 internal/apierror/apierror.go create mode 100644 internal/apiform/encoder.go create mode 100644 internal/apiform/form.go create mode 100644 internal/apiform/form_test.go create mode 100644 internal/apiform/tag.go create mode 100644 internal/apijson/decoder.go create mode 100644 internal/apijson/encoder.go create mode 100644 internal/apijson/field.go create mode 100644 internal/apijson/field_test.go create mode 100644 internal/apijson/json_test.go create mode 100644 internal/apijson/port.go create mode 100644 internal/apijson/port_test.go create mode 100644 internal/apijson/registry.go create mode 100644 internal/apijson/tag.go create mode 100644 internal/apiquery/encoder.go create mode 100644 internal/apiquery/query.go create mode 100644 internal/apiquery/query_test.go create mode 100644 internal/apiquery/tag.go create mode 100644 internal/pagination/pagination.go create mode 100644 internal/param/field.go create mode 100644 internal/requestconfig/requestconfig.go create mode 100644 internal/testutil/testutil.go create mode 100644 internal/version.go create mode 100644 lib/.keep create mode 100644 model.go create mode 100644 model_test.go create mode 100644 moderation.go create mode 100644 moderation_test.go create mode 100644 option/requestoption.go create mode 100644 packages/ssestream/streaming.go create mode 100644 paginationauto_test.go create mode 100644 paginationmanual_test.go create mode 100644 release-please-config.json create mode 100755 scripts/bootstrap create mode 100755 scripts/format create mode 100755 scripts/lint create mode 100755 scripts/mock create mode 100755 scripts/test create mode 100644 shared/shared.go create mode 100644 shared/union.go create mode 100644 upload.go create mode 100644 upload_test.go create mode 100644 uploadpart.go create mode 100644 uploadpart_test.go create mode 100644 usage_test.go diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..1aa883d --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,23 @@ +# syntax=docker/dockerfile:1 +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y \ + libxkbcommon0 \ + ca-certificates \ + git \ + golang \ + unzip \ + libc++1 \ + vim \ + && apt-get clean autoclean + +# Ensure UTF-8 encoding +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 + +ENV GOPATH=/go +ENV PATH=$GOPATH/bin:$PATH + +WORKDIR /workspace + +COPY . /workspace diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..d55fc4d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,20 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/debian +{ + "name": "Debian", + "build": { + "dockerfile": "Dockerfile" + } + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..dc9085d --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,41 @@ +name: CI +on: + push: + branches: + - main + pull_request: + branches: + - main + - next + +jobs: + lint: + name: lint + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-go' + + steps: + - uses: actions/checkout@v4 + + - name: Setup go + uses: actions/setup-go@v5 + + - name: Run lints + run: ./scripts/lint + test: + name: test + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-go' + + steps: + - uses: actions/checkout@v4 + + - name: Setup go + uses: actions/setup-go@v5 + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test + diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 0000000..069decb --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,29 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - main + +jobs: + release: + name: release + if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-go' + runs-on: ubuntu-latest + environment: publish + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Generate godocs + if: ${{ steps.release.outputs.releases_created }} + run: | + version=$(jq -r '. | to_entries[0] | .value' .release-please-manifest.json) + curl -X POST https://pkg.go.dev/fetch/github.com/openai/openai-go@v${version} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c6d0501 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.prism.log +codegen.log +Brewfile.lock.json +.idea/ diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 0000000..c476280 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.0.1-alpha.0" +} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml new file mode 100644 index 0000000..4e4cb55 --- /dev/null +++ b/.stats.yml @@ -0,0 +1,2 @@ +configured_endpoints: 68 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..8541a3a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,59 @@ +## Setting up the environment + +### Install Go 1.18+ + +Install go by following relevant directions [here](https://go.dev/doc/install). + +## Modifying/Adding code + +Most of the SDK is generated code, and any modified code will be overridden on the next generation. The +`examples/` directory is an exception and will never be overridden. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the Stainless generator and can be freely edited or +added to. + +```bash +# add an example to examples//main.go + +package main + +func main() { + // ... +} +``` + +```bash +go run ./examples/ +``` + +## Using the repository from source + +To use a local version of this library from source in another project, edit the `go.mod` with a replace +directive. This can be done through the CLI with the following: + +```bash +go mod edit -replace github.com/openai/openai-go=/path/to/openai-go +``` + +## Running tests + +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. + +```bash +# you will need npm installed +npx prism mock path/to/your/openapi.yml +``` + +```bash +go test ./... +``` + +## Formatting + +This library uses the standard gofmt code formatter: + +```bash +gofmt -s -w . +``` diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..621a6be --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 OpenAI + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..8746450 --- /dev/null +++ b/README.md @@ -0,0 +1,408 @@ +# OpenAI Go API Library + +Go Reference + +The OpenAI Go library provides convenient access to [the OpenAI REST +API](https://platform.openai.com/docs) from applications written in Go. The full API of this library can be found in [api.md](api.md). + +## Installation + + + +```go +import ( + "github.com/openai/openai-go" // imported as openai +) +``` + + + +Or to pin the version: + + + +```sh +go get -u 'github.com/openai/openai-go@v0.0.1-alpha.0' +``` + + + +## Requirements + +This library requires Go 1.18+. + +## Usage + +The full API of this library can be found in [api.md](api.md). + +```go +package main + +import ( + "context" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func main() { + client := openai.NewClient( + option.WithAPIKey("My API Key"), // defaults to os.LookupEnv("OPENAI_API_KEY") + ) + chatCompletion, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err != nil { + panic(err.Error()) + } +} + +``` + +### Request fields + +All request parameters are wrapped in a generic `Field` type, +which we use to distinguish zero values from null or omitted fields. + +This prevents accidentally sending a zero value if you forget a required parameter, +and enables explicitly sending `null`, `false`, `''`, or `0` on optional parameters. +Any field not specified is not sent. + +To construct fields with values, use the helpers `String()`, `Int()`, `Float()`, or most commonly, the generic `F[T]()`. +To send a null, use `Null[T]()`, and to send a nonconforming value, use `Raw[T](any)`. For example: + +```go +params := FooParams{ + Name: openai.F("hello"), + + // Explicitly send `"description": null` + Description: openai.Null[string](), + + Point: openai.F(openai.Point{ + X: openai.Int(0), + Y: openai.Int(1), + + // In cases where the API specifies a given type, + // but you want to send something else, use `Raw`: + Z: openai.Raw[int64](0.01), // sends a float + }), +} +``` + +### Response objects + +All fields in response structs are value types (not pointers or wrappers). + +If a given field is `null`, not present, or invalid, the corresponding field +will simply be its zero value. + +All response structs also include a special `JSON` field, containing more detailed +information about each property, which you can use like so: + +```go +if res.Name == "" { + // true if `"name"` is either not present or explicitly null + res.JSON.Name.IsNull() + + // true if the `"name"` key was not present in the repsonse JSON at all + res.JSON.Name.IsMissing() + + // When the API returns data that cannot be coerced to the expected type: + if res.JSON.Name.IsInvalid() { + raw := res.JSON.Name.Raw() + + legacyName := struct{ + First string `json:"first"` + Last string `json:"last"` + }{} + json.Unmarshal([]byte(raw), &legacyName) + name = legacyName.First + " " + legacyName.Last + } +} +``` + +These `.JSON` structs also include an `Extras` map containing +any properties in the json response that were not specified +in the struct. This can be useful for API features not yet +present in the SDK. + +```go +body := res.JSON.ExtraFields["my_unexpected_field"].Raw() +``` + +### RequestOptions + +This library uses the functional options pattern. Functions defined in the +`option` package return a `RequestOption`, which is a closure that mutates a +`RequestConfig`. These options can be supplied to the client or at individual +requests. For example: + +```go +client := openai.NewClient( + // Adds a header to every request made by the client + option.WithHeader("X-Some-Header", "custom_header_info"), +) + +client.Chat.Completions.New(context.TODO(), ..., + // Override the header + option.WithHeader("X-Some-Header", "some_other_custom_header_info"), + // Add an undocumented field to the request body, using sjson syntax + option.WithJSONSet("some.json.path", map[string]string{"my": "object"}), +) +``` + +See the [full list of request options](https://pkg.go.dev/github.com/openai/openai-go/option). + +### Pagination + +This library provides some conveniences for working with paginated list endpoints. + +You can use `.ListAutoPaging()` methods to iterate through items across all pages: + +```go +iter := client.FineTuning.Jobs.ListAutoPaging(context.TODO(), openai.FineTuningJobListParams{ + Limit: openai.F(int64(20)), +}) +// Automatically fetches more pages as needed. +for iter.Next() { + fineTuningJob := iter.Current() + fmt.Printf("%+v\n", fineTuningJob) +} +if err := iter.Err(); err != nil { + panic(err.Error()) +} +``` + +Or you can use simple `.List()` methods to fetch a single page and receive a standard response object +with additional helper methods like `.GetNextPage()`, e.g.: + +```go +page, err := client.FineTuning.Jobs.List(context.TODO(), openai.FineTuningJobListParams{ + Limit: openai.F(int64(20)), +}) +for page != nil { + for _, job := range page.Data { + fmt.Printf("%+v\n", job) + } + page, err = page.GetNextPage() +} +if err != nil { + panic(err.Error()) +} +``` + +### Errors + +When the API returns a non-success status code, we return an error with type +`*openai.Error`. This contains the `StatusCode`, `*http.Request`, and +`*http.Response` values of the request, as well as the JSON of the error body +(much like other response objects in the SDK). + +To handle errors, we recommend that you use the `errors.As` pattern: + +```go +_, err := client.FineTuning.Jobs.New(context.TODO(), openai.FineTuningJobNewParams{ + Model: openai.F(openai.FineTuningJobNewParamsModelBabbage002), + TrainingFile: openai.F("file-abc123"), +}) +if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + println(string(apierr.DumpRequest(true))) // Prints the serialized HTTP request + println(string(apierr.DumpResponse(true))) // Prints the serialized HTTP response + } + panic(err.Error()) // GET "/fine_tuning/jobs": 400 Bad Request { ... } +} +``` + +When other errors occur, they are returned unwrapped; for example, +if HTTP transport fails, you might receive `*url.Error` wrapping `*net.OpError`. + +### Timeouts + +Requests do not time out by default; use context to configure a timeout for a request lifecycle. + +Note that if a request is [retried](#retries), the context timeout does not start over. +To set a per-retry timeout, use `option.WithRequestTimeout()`. + +```go +// This sets the timeout for the request, including all the retries. +ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) +defer cancel() +client.Chat.Completions.New( + ctx, + openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("How can I list all files in a directory using Python?")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }, + // This sets the per-retry timeout + option.WithRequestTimeout(20*time.Second), +) +``` + +### File uploads + +Request parameters that correspond to file uploads in multipart requests are typed as +`param.Field[io.Reader]`. The contents of the `io.Reader` will by default be sent as a multipart form +part with the file name of "anonymous_file" and content-type of "application/octet-stream". + +The file name and content-type can be customized by implementing `Name() string` or `ContentType() +string` on the run-time type of `io.Reader`. Note that `os.File` implements `Name() string`, so a +file returned by `os.Open` will be sent with the file name on disk. + +We also provide a helper `openai.FileParam(reader io.Reader, filename string, contentType string)` +which can be used to wrap any `io.Reader` with the appropriate file name and content type. + +```go +// A file from the file system +file, err := os.Open("input.jsonl") +openai.FileNewParams{ + File: openai.F[io.Reader](file), + Purpose: openai.F(openai.FileNewParamsPurposeFineTune), +} + +// A file from a string +openai.FileNewParams{ + File: openai.F[io.Reader](strings.NewReader("my file contents")), + Purpose: openai.F(openai.FileNewParamsPurposeFineTune), +} + +// With a custom filename and contentType +openai.FileNewParams{ + File: openai.FileParam(strings.NewReader(`{"hello": "foo"}`), "file.go", "application/json"), + Purpose: openai.F(openai.FileNewParamsPurposeFineTune), +} +``` + +### Retries + +Certain errors will be automatically retried 2 times by default, with a short exponential backoff. +We retry by default all connection errors, 408 Request Timeout, 409 Conflict, 429 Rate Limit, +and >=500 Internal errors. + +You can use the `WithMaxRetries` option to configure or disable this: + +```go +// Configure the default for all requests: +client := openai.NewClient( + option.WithMaxRetries(0), // default is 2 +) + +// Override per-request: +client.Chat.Completions.New( + context.TODO(), + openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("How can I get the name of the current day in Node.js?")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }, + option.WithMaxRetries(5), +) +``` + +### Making custom/undocumented requests + +This library is typed for convenient access to the documented API. If you need to access undocumented +endpoints, params, or response properties, the library can still be used. + +#### Undocumented endpoints + +To make requests to undocumented endpoints, you can use `client.Get`, `client.Post`, and other HTTP verbs. +`RequestOptions` on the client, such as retries, will be respected when making these requests. + +```go +var ( + // params can be an io.Reader, a []byte, an encoding/json serializable object, + // or a "…Params" struct defined in this library. + params map[string]interface{} + + // result can be an []byte, *http.Response, a encoding/json deserializable object, + // or a model defined in this library. + result *http.Response +) +err := client.Post(context.Background(), "/unspecified", params, &result) +if err != nil { + … +} +``` + +#### Undocumented request params + +To make requests using undocumented parameters, you may use either the `option.WithQuerySet()` +or the `option.WithJSONSet()` methods. + +```go +params := FooNewParams{ + ID: openai.F("id_xxxx"), + Data: openai.F(FooNewParamsData{ + FirstName: openai.F("John"), + }), +} +client.Foo.New(context.Background(), params, option.WithJSONSet("data.last_name", "Doe")) +``` + +#### Undocumented response properties + +To access undocumented response properties, you may either access the raw JSON of the response as a string +with `result.JSON.RawJSON()`, or get the raw JSON of a particular field on the result with +`result.JSON.Foo.Raw()`. + +Any fields that are not present on the response struct will be saved and can be accessed by `result.JSON.ExtraFields()` which returns the extra fields as a `map[string]Field`. + +### Middleware + +We provide `option.WithMiddleware` which applies the given +middleware to requests. + +```go +func Logger(req *http.Request, next option.MiddlewareNext) (res *http.Response, err error) { + // Before the request + start := time.Now() + LogReq(req) + + // Forward the request to the next handler + res, err = next(req) + + // Handle stuff after the request + end := time.Now() + LogRes(res, err, start - end) + + return res, err +} + +client := openai.NewClient( + option.WithMiddleware(Logger), +) +``` + +When multiple middlewares are provided as variadic arguments, the middlewares +are applied left to right. If `option.WithMiddleware` is given +multiple times, for example first in the client then the method, the +middleware in the client will run first and the middleware given in the method +will run next. + +You may also replace the default `http.Client` with +`option.WithHTTPClient(client)`. Only one http client is +accepted (this overwrites any previous client) and receives requests after any +middleware has been applied. + +## Semantic versioning + +This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: + +1. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. +2. Changes that we do not expect to impact the vast majority of users in practice. + +We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. + +We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-go/issues) with questions, bugs, or suggestions. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..c54acaf --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,29 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainlessapi.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by OpenAI please follow the respective company's security reporting guidelines. + +### OpenAI Terms and Policies + +Our Security Policy can be found at [Security Policy URL](https://openai.com/policies/coordinated-vulnerability-disclosure-policy). + +Please contact disclosure@openai.com for any questions or concerns regarding security of our services. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/aliases.go b/aliases.go new file mode 100644 index 0000000..b84f85f --- /dev/null +++ b/aliases.go @@ -0,0 +1,30 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "github.com/openai/openai-go/internal/apierror" + "github.com/openai/openai-go/shared" +) + +type Error = apierror.Error + +// This is an alias to an internal type. +type ErrorObject = shared.ErrorObject + +// This is an alias to an internal type. +type FunctionDefinition = shared.FunctionDefinition + +// This is an alias to an internal type. +type FunctionDefinitionParam = shared.FunctionDefinitionParam + +// The parameters the functions accepts, described as a JSON Schema object. See the +// [guide](https://platform.openai.com/docs/guides/function-calling) for examples, +// and the +// [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for +// documentation about the format. +// +// Omitting `parameters` defines a function with an empty parameter list. +// +// This is an alias to an internal type. +type FunctionParameters = shared.FunctionParameters diff --git a/api.md b/api.md new file mode 100644 index 0000000..6ee3cc5 --- /dev/null +++ b/api.md @@ -0,0 +1,406 @@ +# Shared Params Types + +- shared.FunctionDefinitionParam +- shared.FunctionParameters + +# Shared Response Types + +- shared.ErrorObject +- shared.FunctionDefinition +- shared.FunctionParameters + +# Completions + +Response Types: + +- openai.Completion +- openai.CompletionChoice +- openai.CompletionUsage + +Methods: + +- client.Completions.New(ctx context.Context, body openai.CompletionNewParams) (openai.Completion, error) + +# Chat + +Params Types: + +- openai.ChatModel + +## Completions + +Params Types: + +- openai.ChatCompletionAssistantMessageParam +- openai.ChatCompletionContentPartUnionParam +- openai.ChatCompletionContentPartImageParam +- openai.ChatCompletionContentPartTextParam +- openai.ChatCompletionFunctionCallOptionParam +- openai.ChatCompletionFunctionMessageParam +- openai.ChatCompletionMessageParamUnion +- openai.ChatCompletionMessageToolCallParam +- openai.ChatCompletionNamedToolChoiceParam +- openai.ChatCompletionStreamOptionsParam +- openai.ChatCompletionSystemMessageParam +- openai.ChatCompletionToolParam +- openai.ChatCompletionToolChoiceOptionUnionParam +- openai.ChatCompletionToolMessageParam +- openai.ChatCompletionUserMessageParam + +Response Types: + +- openai.ChatCompletion +- openai.ChatCompletionChunk +- openai.ChatCompletionMessage +- openai.ChatCompletionMessageToolCall +- openai.ChatCompletionTokenLogprob + +Methods: + +- client.Chat.Completions.New(ctx context.Context, body openai.ChatCompletionNewParams) (openai.ChatCompletion, error) + +# Embeddings + +Response Types: + +- openai.CreateEmbeddingResponse +- openai.Embedding + +Methods: + +- client.Embeddings.New(ctx context.Context, body openai.EmbeddingNewParams) (openai.CreateEmbeddingResponse, error) + +# Files + +Response Types: + +- openai.FileDeleted +- openai.FileObject + +Methods: + +- client.Files.New(ctx context.Context, body openai.FileNewParams) (openai.FileObject, error) +- client.Files.Get(ctx context.Context, fileID string) (openai.FileObject, error) +- client.Files.List(ctx context.Context, query openai.FileListParams) (pagination.Page[openai.FileObject], error) +- client.Files.Delete(ctx context.Context, fileID string) (openai.FileDeleted, error) +- client.Files.Content(ctx context.Context, fileID string) (http.Response, error) + +# Images + +Response Types: + +- openai.Image +- openai.ImagesResponse + +Methods: + +- client.Images.NewVariation(ctx context.Context, body openai.ImageNewVariationParams) (openai.ImagesResponse, error) +- client.Images.Edit(ctx context.Context, body openai.ImageEditParams) (openai.ImagesResponse, error) +- client.Images.Generate(ctx context.Context, body openai.ImageGenerateParams) (openai.ImagesResponse, error) + +# Audio + +## Transcriptions + +Response Types: + +- openai.Transcription + +Methods: + +- client.Audio.Transcriptions.New(ctx context.Context, body openai.AudioTranscriptionNewParams) (openai.Transcription, error) + +## Translations + +Response Types: + +- openai.Translation + +Methods: + +- client.Audio.Translations.New(ctx context.Context, body openai.AudioTranslationNewParams) (openai.Translation, error) + +## Speech + +Methods: + +- client.Audio.Speech.New(ctx context.Context, body openai.AudioSpeechNewParams) (http.Response, error) + +# Moderations + +Response Types: + +- openai.Moderation +- openai.ModerationNewResponse + +Methods: + +- client.Moderations.New(ctx context.Context, body openai.ModerationNewParams) (openai.ModerationNewResponse, error) + +# Models + +Response Types: + +- openai.Model +- openai.ModelDeleted + +Methods: + +- client.Models.Get(ctx context.Context, model string) (openai.Model, error) +- client.Models.List(ctx context.Context) (pagination.Page[openai.Model], error) +- client.Models.Delete(ctx context.Context, model string) (openai.ModelDeleted, error) + +# FineTuning + +## Jobs + +Response Types: + +- openai.FineTuningJob +- openai.FineTuningJobEvent +- openai.FineTuningJobWandbIntegration +- openai.FineTuningJobWandbIntegrationObject + +Methods: + +- client.FineTuning.Jobs.New(ctx context.Context, body openai.FineTuningJobNewParams) (openai.FineTuningJob, error) +- client.FineTuning.Jobs.Get(ctx context.Context, fineTuningJobID string) (openai.FineTuningJob, error) +- client.FineTuning.Jobs.List(ctx context.Context, query openai.FineTuningJobListParams) (pagination.CursorPage[openai.FineTuningJob], error) +- client.FineTuning.Jobs.Cancel(ctx context.Context, fineTuningJobID string) (openai.FineTuningJob, error) +- client.FineTuning.Jobs.ListEvents(ctx context.Context, fineTuningJobID string, query openai.FineTuningJobListEventsParams) (pagination.CursorPage[openai.FineTuningJobEvent], error) + +### Checkpoints + +Response Types: + +- openai.FineTuningJobCheckpoint + +Methods: + +- client.FineTuning.Jobs.Checkpoints.List(ctx context.Context, fineTuningJobID string, query openai.FineTuningJobCheckpointListParams) (pagination.CursorPage[openai.FineTuningJobCheckpoint], error) + +# Beta + +## VectorStores + +Response Types: + +- openai.VectorStore +- openai.VectorStoreDeleted + +Methods: + +- client.Beta.VectorStores.New(ctx context.Context, body openai.BetaVectorStoreNewParams) (openai.VectorStore, error) +- client.Beta.VectorStores.Get(ctx context.Context, vectorStoreID string) (openai.VectorStore, error) +- client.Beta.VectorStores.Update(ctx context.Context, vectorStoreID string, body openai.BetaVectorStoreUpdateParams) (openai.VectorStore, error) +- client.Beta.VectorStores.List(ctx context.Context, query openai.BetaVectorStoreListParams) (pagination.CursorPage[openai.VectorStore], error) +- client.Beta.VectorStores.Delete(ctx context.Context, vectorStoreID string) (openai.VectorStoreDeleted, error) + +### Files + +Response Types: + +- openai.VectorStoreFile +- openai.VectorStoreFileDeleted + +Methods: + +- client.Beta.VectorStores.Files.New(ctx context.Context, vectorStoreID string, body openai.BetaVectorStoreFileNewParams) (openai.VectorStoreFile, error) +- client.Beta.VectorStores.Files.Get(ctx context.Context, vectorStoreID string, fileID string) (openai.VectorStoreFile, error) +- client.Beta.VectorStores.Files.List(ctx context.Context, vectorStoreID string, query openai.BetaVectorStoreFileListParams) (pagination.CursorPage[openai.VectorStoreFile], error) +- client.Beta.VectorStores.Files.Delete(ctx context.Context, vectorStoreID string, fileID string) (openai.VectorStoreFileDeleted, error) + +### FileBatches + +Response Types: + +- openai.VectorStoreFileBatch + +Methods: + +- client.Beta.VectorStores.FileBatches.New(ctx context.Context, vectorStoreID string, body openai.BetaVectorStoreFileBatchNewParams) (openai.VectorStoreFileBatch, error) +- client.Beta.VectorStores.FileBatches.Get(ctx context.Context, vectorStoreID string, batchID string) (openai.VectorStoreFileBatch, error) +- client.Beta.VectorStores.FileBatches.Cancel(ctx context.Context, vectorStoreID string, batchID string) (openai.VectorStoreFileBatch, error) +- client.Beta.VectorStores.FileBatches.ListFiles(ctx context.Context, vectorStoreID string, batchID string, query openai.BetaVectorStoreFileBatchListFilesParams) (pagination.CursorPage[openai.VectorStoreFile], error) + +## Assistants + +Params Types: + +- openai.AssistantToolUnionParam +- openai.CodeInterpreterToolParam +- openai.FileSearchToolParam +- openai.FunctionToolParam + +Response Types: + +- openai.Assistant +- openai.AssistantDeleted +- openai.AssistantStreamEvent +- openai.AssistantTool +- openai.CodeInterpreterTool +- openai.FileSearchTool +- openai.FunctionTool + +Methods: + +- client.Beta.Assistants.New(ctx context.Context, body openai.BetaAssistantNewParams) (openai.Assistant, error) +- client.Beta.Assistants.Get(ctx context.Context, assistantID string) (openai.Assistant, error) +- client.Beta.Assistants.Update(ctx context.Context, assistantID string, body openai.BetaAssistantUpdateParams) (openai.Assistant, error) +- client.Beta.Assistants.List(ctx context.Context, query openai.BetaAssistantListParams) (pagination.CursorPage[openai.Assistant], error) +- client.Beta.Assistants.Delete(ctx context.Context, assistantID string) (openai.AssistantDeleted, error) + +## Threads + +Params Types: + +- openai.AssistantResponseFormatParam +- openai.AssistantResponseFormatOptionUnionParam +- openai.AssistantToolChoiceParam +- openai.AssistantToolChoiceFunctionParam +- openai.AssistantToolChoiceOptionUnionParam + +Response Types: + +- openai.AssistantResponseFormat +- openai.AssistantResponseFormatOptionUnion +- openai.AssistantToolChoice +- openai.AssistantToolChoiceFunction +- openai.AssistantToolChoiceOptionUnion +- openai.Thread +- openai.ThreadDeleted + +Methods: + +- client.Beta.Threads.New(ctx context.Context, body openai.BetaThreadNewParams) (openai.Thread, error) +- client.Beta.Threads.Get(ctx context.Context, threadID string) (openai.Thread, error) +- client.Beta.Threads.Update(ctx context.Context, threadID string, body openai.BetaThreadUpdateParams) (openai.Thread, error) +- client.Beta.Threads.Delete(ctx context.Context, threadID string) (openai.ThreadDeleted, error) +- client.Beta.Threads.NewAndRun(ctx context.Context, body openai.BetaThreadNewAndRunParams) (openai.Run, error) + +### Runs + +Response Types: + +- openai.RequiredActionFunctionToolCall +- openai.Run +- openai.RunStatus + +Methods: + +- client.Beta.Threads.Runs.New(ctx context.Context, threadID string, body openai.BetaThreadRunNewParams) (openai.Run, error) +- client.Beta.Threads.Runs.Get(ctx context.Context, threadID string, runID string) (openai.Run, error) +- client.Beta.Threads.Runs.Update(ctx context.Context, threadID string, runID string, body openai.BetaThreadRunUpdateParams) (openai.Run, error) +- client.Beta.Threads.Runs.List(ctx context.Context, threadID string, query openai.BetaThreadRunListParams) (pagination.CursorPage[openai.Run], error) +- client.Beta.Threads.Runs.Cancel(ctx context.Context, threadID string, runID string) (openai.Run, error) +- client.Beta.Threads.Runs.SubmitToolOutputs(ctx context.Context, threadID string, runID string, body openai.BetaThreadRunSubmitToolOutputsParams) (openai.Run, error) + +#### Steps + +Response Types: + +- openai.CodeInterpreterLogs +- openai.CodeInterpreterOutputImage +- openai.CodeInterpreterToolCall +- openai.CodeInterpreterToolCallDelta +- openai.FileSearchToolCall +- openai.FileSearchToolCallDelta +- openai.FunctionToolCall +- openai.FunctionToolCallDelta +- openai.MessageCreationStepDetails +- openai.RunStep +- openai.RunStepDelta +- openai.RunStepDeltaEvent +- openai.RunStepDeltaMessageDelta +- openai.ToolCall +- openai.ToolCallDelta +- openai.ToolCallDeltaObject +- openai.ToolCallsStepDetails + +Methods: + +- client.Beta.Threads.Runs.Steps.Get(ctx context.Context, threadID string, runID string, stepID string) (openai.RunStep, error) +- client.Beta.Threads.Runs.Steps.List(ctx context.Context, threadID string, runID string, query openai.BetaThreadRunStepListParams) (pagination.CursorPage[openai.RunStep], error) + +### Messages + +Params Types: + +- openai.ImageFileParam +- openai.ImageFileContentBlockParam +- openai.ImageURLParam +- openai.ImageURLContentBlockParam +- openai.MessageContentPartParamUnion +- openai.TextContentBlockParam + +Response Types: + +- openai.Annotation +- openai.AnnotationDelta +- openai.FileCitationAnnotation +- openai.FileCitationDeltaAnnotation +- openai.FilePathAnnotation +- openai.FilePathDeltaAnnotation +- openai.ImageFile +- openai.ImageFileContentBlock +- openai.ImageFileDelta +- openai.ImageFileDeltaBlock +- openai.ImageURL +- openai.ImageURLContentBlock +- openai.ImageURLDelta +- openai.ImageURLDeltaBlock +- openai.Message +- openai.MessageContent +- openai.MessageContentDelta +- openai.MessageDeleted +- openai.MessageDelta +- openai.MessageDeltaEvent +- openai.Text +- openai.TextContentBlock +- openai.TextDelta +- openai.TextDeltaBlock + +Methods: + +- client.Beta.Threads.Messages.New(ctx context.Context, threadID string, body openai.BetaThreadMessageNewParams) (openai.Message, error) +- client.Beta.Threads.Messages.Get(ctx context.Context, threadID string, messageID string) (openai.Message, error) +- client.Beta.Threads.Messages.Update(ctx context.Context, threadID string, messageID string, body openai.BetaThreadMessageUpdateParams) (openai.Message, error) +- client.Beta.Threads.Messages.List(ctx context.Context, threadID string, query openai.BetaThreadMessageListParams) (pagination.CursorPage[openai.Message], error) +- client.Beta.Threads.Messages.Delete(ctx context.Context, threadID string, messageID string) (openai.MessageDeleted, error) + +# Batches + +Response Types: + +- openai.Batch +- openai.BatchError +- openai.BatchRequestCounts + +Methods: + +- client.Batches.New(ctx context.Context, body openai.BatchNewParams) (openai.Batch, error) +- client.Batches.Get(ctx context.Context, batchID string) (openai.Batch, error) +- client.Batches.List(ctx context.Context, query openai.BatchListParams) (pagination.CursorPage[openai.Batch], error) +- client.Batches.Cancel(ctx context.Context, batchID string) (openai.Batch, error) + +# Uploads + +Response Types: + +- openai.Upload + +Methods: + +- client.Uploads.New(ctx context.Context, body openai.UploadNewParams) (openai.Upload, error) +- client.Uploads.Cancel(ctx context.Context, uploadID string) (openai.Upload, error) +- client.Uploads.Complete(ctx context.Context, uploadID string, body openai.UploadCompleteParams) (openai.Upload, error) + +## Parts + +Response Types: + +- openai.UploadPart + +Methods: + +- client.Uploads.Parts.New(ctx context.Context, uploadID string, body openai.UploadPartNewParams) (openai.UploadPart, error) diff --git a/audio.go b/audio.go new file mode 100644 index 0000000..9ea966d --- /dev/null +++ b/audio.go @@ -0,0 +1,32 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "github.com/openai/openai-go/option" +) + +// AudioService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewAudioService] method instead. +type AudioService struct { + Options []option.RequestOption + Transcriptions *AudioTranscriptionService + Translations *AudioTranslationService + Speech *AudioSpeechService +} + +// NewAudioService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewAudioService(opts ...option.RequestOption) (r *AudioService) { + r = &AudioService{} + r.Options = opts + r.Transcriptions = NewAudioTranscriptionService(opts...) + r.Translations = NewAudioTranslationService(opts...) + r.Speech = NewAudioSpeechService(opts...) + return +} diff --git a/audiospeech.go b/audiospeech.go new file mode 100644 index 0000000..1412ea1 --- /dev/null +++ b/audiospeech.go @@ -0,0 +1,123 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// AudioSpeechService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewAudioSpeechService] method instead. +type AudioSpeechService struct { + Options []option.RequestOption +} + +// NewAudioSpeechService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewAudioSpeechService(opts ...option.RequestOption) (r *AudioSpeechService) { + r = &AudioSpeechService{} + r.Options = opts + return +} + +// Generates audio from the input text. +func (r *AudioSpeechService) New(ctx context.Context, body AudioSpeechNewParams, opts ...option.RequestOption) (res *http.Response, err error) { + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithHeader("Accept", "application/octet-stream")}, opts...) + path := "audio/speech" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type AudioSpeechNewParams struct { + // The text to generate audio for. The maximum length is 4096 characters. + Input param.Field[string] `json:"input,required"` + // One of the available [TTS models](https://platform.openai.com/docs/models/tts): + // `tts-1` or `tts-1-hd` + Model param.Field[AudioSpeechNewParamsModel] `json:"model,required"` + // The voice to use when generating the audio. Supported voices are `alloy`, + // `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are + // available in the + // [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + Voice param.Field[AudioSpeechNewParamsVoice] `json:"voice,required"` + // The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + // `wav`, and `pcm`. + ResponseFormat param.Field[AudioSpeechNewParamsResponseFormat] `json:"response_format"` + // The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + // the default. + Speed param.Field[float64] `json:"speed"` +} + +func (r AudioSpeechNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type AudioSpeechNewParamsModel string + +const ( + AudioSpeechNewParamsModelTTS1 AudioSpeechNewParamsModel = "tts-1" + AudioSpeechNewParamsModelTTS1HD AudioSpeechNewParamsModel = "tts-1-hd" +) + +func (r AudioSpeechNewParamsModel) IsKnown() bool { + switch r { + case AudioSpeechNewParamsModelTTS1, AudioSpeechNewParamsModelTTS1HD: + return true + } + return false +} + +// The voice to use when generating the audio. Supported voices are `alloy`, +// `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are +// available in the +// [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). +type AudioSpeechNewParamsVoice string + +const ( + AudioSpeechNewParamsVoiceAlloy AudioSpeechNewParamsVoice = "alloy" + AudioSpeechNewParamsVoiceEcho AudioSpeechNewParamsVoice = "echo" + AudioSpeechNewParamsVoiceFable AudioSpeechNewParamsVoice = "fable" + AudioSpeechNewParamsVoiceOnyx AudioSpeechNewParamsVoice = "onyx" + AudioSpeechNewParamsVoiceNova AudioSpeechNewParamsVoice = "nova" + AudioSpeechNewParamsVoiceShimmer AudioSpeechNewParamsVoice = "shimmer" +) + +func (r AudioSpeechNewParamsVoice) IsKnown() bool { + switch r { + case AudioSpeechNewParamsVoiceAlloy, AudioSpeechNewParamsVoiceEcho, AudioSpeechNewParamsVoiceFable, AudioSpeechNewParamsVoiceOnyx, AudioSpeechNewParamsVoiceNova, AudioSpeechNewParamsVoiceShimmer: + return true + } + return false +} + +// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, +// `wav`, and `pcm`. +type AudioSpeechNewParamsResponseFormat string + +const ( + AudioSpeechNewParamsResponseFormatMP3 AudioSpeechNewParamsResponseFormat = "mp3" + AudioSpeechNewParamsResponseFormatOpus AudioSpeechNewParamsResponseFormat = "opus" + AudioSpeechNewParamsResponseFormatAAC AudioSpeechNewParamsResponseFormat = "aac" + AudioSpeechNewParamsResponseFormatFLAC AudioSpeechNewParamsResponseFormat = "flac" + AudioSpeechNewParamsResponseFormatWAV AudioSpeechNewParamsResponseFormat = "wav" + AudioSpeechNewParamsResponseFormatPCM AudioSpeechNewParamsResponseFormat = "pcm" +) + +func (r AudioSpeechNewParamsResponseFormat) IsKnown() bool { + switch r { + case AudioSpeechNewParamsResponseFormatMP3, AudioSpeechNewParamsResponseFormatOpus, AudioSpeechNewParamsResponseFormatAAC, AudioSpeechNewParamsResponseFormatFLAC, AudioSpeechNewParamsResponseFormatWAV, AudioSpeechNewParamsResponseFormatPCM: + return true + } + return false +} diff --git a/audiospeech_test.go b/audiospeech_test.go new file mode 100644 index 0000000..14f334a --- /dev/null +++ b/audiospeech_test.go @@ -0,0 +1,56 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/option" +) + +func TestAudioSpeechNewWithOptionalParams(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Write([]byte("abc")) + })) + defer server.Close() + baseURL := server.URL + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + resp, err := client.Audio.Speech.New(context.TODO(), openai.AudioSpeechNewParams{ + Input: openai.F("input"), + Model: openai.F(openai.AudioSpeechNewParamsModelTTS1), + Voice: openai.F(openai.AudioSpeechNewParamsVoiceAlloy), + ResponseFormat: openai.F(openai.AudioSpeechNewParamsResponseFormatMP3), + Speed: openai.F(0.250000), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } + defer resp.Body.Close() + + b, err := io.ReadAll(resp.Body) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } + if !bytes.Equal(b, []byte("abc")) { + t.Fatalf("return value not %s: %s", "abc", b) + } +} diff --git a/audiotranscription.go b/audiotranscription.go new file mode 100644 index 0000000..df5f27e --- /dev/null +++ b/audiotranscription.go @@ -0,0 +1,164 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "bytes" + "context" + "io" + "mime/multipart" + "net/http" + + "github.com/openai/openai-go/internal/apiform" + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// AudioTranscriptionService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewAudioTranscriptionService] method instead. +type AudioTranscriptionService struct { + Options []option.RequestOption +} + +// NewAudioTranscriptionService generates a new service that applies the given +// options to each request. These options are applied after the parent client's +// options (if there is one), and before any request-specific options. +func NewAudioTranscriptionService(opts ...option.RequestOption) (r *AudioTranscriptionService) { + r = &AudioTranscriptionService{} + r.Options = opts + return +} + +// Transcribes audio into the input language. +func (r *AudioTranscriptionService) New(ctx context.Context, body AudioTranscriptionNewParams, opts ...option.RequestOption) (res *Transcription, err error) { + opts = append(r.Options[:], opts...) + path := "audio/transcriptions" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Represents a transcription response returned by model, based on the provided +// input. +type Transcription struct { + // The transcribed text. + Text string `json:"text,required"` + JSON transcriptionJSON `json:"-"` +} + +// transcriptionJSON contains the JSON metadata for the struct [Transcription] +type transcriptionJSON struct { + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Transcription) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transcriptionJSON) RawJSON() string { + return r.raw +} + +type AudioTranscriptionNewParams struct { + // The audio file object (not file name) to transcribe, in one of these formats: + // flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + File param.Field[io.Reader] `json:"file,required" format:"binary"` + // ID of the model to use. Only `whisper-1` (which is powered by our open source + // Whisper V2 model) is currently available. + Model param.Field[AudioTranscriptionNewParamsModel] `json:"model,required"` + // The language of the input audio. Supplying the input language in + // [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will + // improve accuracy and latency. + Language param.Field[string] `json:"language"` + // An optional text to guide the model's style or continue a previous audio + // segment. The + // [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + // should match the audio language. + Prompt param.Field[string] `json:"prompt"` + // The format of the transcript output, in one of these options: `json`, `text`, + // `srt`, `verbose_json`, or `vtt`. + ResponseFormat param.Field[AudioTranscriptionNewParamsResponseFormat] `json:"response_format"` + // The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + // output more random, while lower values like 0.2 will make it more focused and + // deterministic. If set to 0, the model will use + // [log probability](https://en.wikipedia.org/wiki/Log_probability) to + // automatically increase the temperature until certain thresholds are hit. + Temperature param.Field[float64] `json:"temperature"` + // The timestamp granularities to populate for this transcription. + // `response_format` must be set `verbose_json` to use timestamp granularities. + // Either or both of these options are supported: `word`, or `segment`. Note: There + // is no additional latency for segment timestamps, but generating word timestamps + // incurs additional latency. + TimestampGranularities param.Field[[]AudioTranscriptionNewParamsTimestampGranularity] `json:"timestamp_granularities"` +} + +func (r AudioTranscriptionNewParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} + +type AudioTranscriptionNewParamsModel string + +const ( + AudioTranscriptionNewParamsModelWhisper1 AudioTranscriptionNewParamsModel = "whisper-1" +) + +func (r AudioTranscriptionNewParamsModel) IsKnown() bool { + switch r { + case AudioTranscriptionNewParamsModelWhisper1: + return true + } + return false +} + +// The format of the transcript output, in one of these options: `json`, `text`, +// `srt`, `verbose_json`, or `vtt`. +type AudioTranscriptionNewParamsResponseFormat string + +const ( + AudioTranscriptionNewParamsResponseFormatJSON AudioTranscriptionNewParamsResponseFormat = "json" + AudioTranscriptionNewParamsResponseFormatText AudioTranscriptionNewParamsResponseFormat = "text" + AudioTranscriptionNewParamsResponseFormatSRT AudioTranscriptionNewParamsResponseFormat = "srt" + AudioTranscriptionNewParamsResponseFormatVerboseJSON AudioTranscriptionNewParamsResponseFormat = "verbose_json" + AudioTranscriptionNewParamsResponseFormatVTT AudioTranscriptionNewParamsResponseFormat = "vtt" +) + +func (r AudioTranscriptionNewParamsResponseFormat) IsKnown() bool { + switch r { + case AudioTranscriptionNewParamsResponseFormatJSON, AudioTranscriptionNewParamsResponseFormatText, AudioTranscriptionNewParamsResponseFormatSRT, AudioTranscriptionNewParamsResponseFormatVerboseJSON, AudioTranscriptionNewParamsResponseFormatVTT: + return true + } + return false +} + +type AudioTranscriptionNewParamsTimestampGranularity string + +const ( + AudioTranscriptionNewParamsTimestampGranularityWord AudioTranscriptionNewParamsTimestampGranularity = "word" + AudioTranscriptionNewParamsTimestampGranularitySegment AudioTranscriptionNewParamsTimestampGranularity = "segment" +) + +func (r AudioTranscriptionNewParamsTimestampGranularity) IsKnown() bool { + switch r { + case AudioTranscriptionNewParamsTimestampGranularityWord, AudioTranscriptionNewParamsTimestampGranularitySegment: + return true + } + return false +} diff --git a/audiotranscription_test.go b/audiotranscription_test.go new file mode 100644 index 0000000..55ebbc5 --- /dev/null +++ b/audiotranscription_test.go @@ -0,0 +1,46 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestAudioTranscriptionNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Audio.Transcriptions.New(context.TODO(), openai.AudioTranscriptionNewParams{ + File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + Model: openai.F(openai.AudioTranscriptionNewParamsModelWhisper1), + Language: openai.F("language"), + Prompt: openai.F("prompt"), + ResponseFormat: openai.F(openai.AudioTranscriptionNewParamsResponseFormatJSON), + Temperature: openai.F(0.000000), + TimestampGranularities: openai.F([]openai.AudioTranscriptionNewParamsTimestampGranularity{openai.AudioTranscriptionNewParamsTimestampGranularityWord, openai.AudioTranscriptionNewParamsTimestampGranularitySegment}), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/audiotranslation.go b/audiotranslation.go new file mode 100644 index 0000000..b50733c --- /dev/null +++ b/audiotranslation.go @@ -0,0 +1,116 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "bytes" + "context" + "io" + "mime/multipart" + "net/http" + + "github.com/openai/openai-go/internal/apiform" + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// AudioTranslationService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewAudioTranslationService] method instead. +type AudioTranslationService struct { + Options []option.RequestOption +} + +// NewAudioTranslationService generates a new service that applies the given +// options to each request. These options are applied after the parent client's +// options (if there is one), and before any request-specific options. +func NewAudioTranslationService(opts ...option.RequestOption) (r *AudioTranslationService) { + r = &AudioTranslationService{} + r.Options = opts + return +} + +// Translates audio into English. +func (r *AudioTranslationService) New(ctx context.Context, body AudioTranslationNewParams, opts ...option.RequestOption) (res *Translation, err error) { + opts = append(r.Options[:], opts...) + path := "audio/translations" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type Translation struct { + Text string `json:"text,required"` + JSON translationJSON `json:"-"` +} + +// translationJSON contains the JSON metadata for the struct [Translation] +type translationJSON struct { + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Translation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r translationJSON) RawJSON() string { + return r.raw +} + +type AudioTranslationNewParams struct { + // The audio file object (not file name) translate, in one of these formats: flac, + // mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + File param.Field[io.Reader] `json:"file,required" format:"binary"` + // ID of the model to use. Only `whisper-1` (which is powered by our open source + // Whisper V2 model) is currently available. + Model param.Field[AudioTranslationNewParamsModel] `json:"model,required"` + // An optional text to guide the model's style or continue a previous audio + // segment. The + // [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + // should be in English. + Prompt param.Field[string] `json:"prompt"` + // The format of the transcript output, in one of these options: `json`, `text`, + // `srt`, `verbose_json`, or `vtt`. + ResponseFormat param.Field[string] `json:"response_format"` + // The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + // output more random, while lower values like 0.2 will make it more focused and + // deterministic. If set to 0, the model will use + // [log probability](https://en.wikipedia.org/wiki/Log_probability) to + // automatically increase the temperature until certain thresholds are hit. + Temperature param.Field[float64] `json:"temperature"` +} + +func (r AudioTranslationNewParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} + +type AudioTranslationNewParamsModel string + +const ( + AudioTranslationNewParamsModelWhisper1 AudioTranslationNewParamsModel = "whisper-1" +) + +func (r AudioTranslationNewParamsModel) IsKnown() bool { + switch r { + case AudioTranslationNewParamsModelWhisper1: + return true + } + return false +} diff --git a/audiotranslation_test.go b/audiotranslation_test.go new file mode 100644 index 0000000..e5634aa --- /dev/null +++ b/audiotranslation_test.go @@ -0,0 +1,44 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestAudioTranslationNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Audio.Translations.New(context.TODO(), openai.AudioTranslationNewParams{ + File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + Model: openai.F(openai.AudioTranslationNewParamsModelWhisper1), + Prompt: openai.F("prompt"), + ResponseFormat: openai.F("response_format"), + Temperature: openai.F(0.000000), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/batch.go b/batch.go new file mode 100644 index 0000000..8b7da8f --- /dev/null +++ b/batch.go @@ -0,0 +1,374 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// BatchService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBatchService] method instead. +type BatchService struct { + Options []option.RequestOption +} + +// NewBatchService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewBatchService(opts ...option.RequestOption) (r *BatchService) { + r = &BatchService{} + r.Options = opts + return +} + +// Creates and executes a batch from an uploaded file of requests +func (r *BatchService) New(ctx context.Context, body BatchNewParams, opts ...option.RequestOption) (res *Batch, err error) { + opts = append(r.Options[:], opts...) + path := "batches" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieves a batch. +func (r *BatchService) Get(ctx context.Context, batchID string, opts ...option.RequestOption) (res *Batch, err error) { + opts = append(r.Options[:], opts...) + if batchID == "" { + err = errors.New("missing required batch_id parameter") + return + } + path := fmt.Sprintf("batches/%s", batchID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// List your organization's batches. +func (r *BatchService) List(ctx context.Context, query BatchListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Batch], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + path := "batches" + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// List your organization's batches. +func (r *BatchService) ListAutoPaging(ctx context.Context, query BatchListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[Batch] { + return pagination.NewCursorPageAutoPager(r.List(ctx, query, opts...)) +} + +// Cancels an in-progress batch. The batch will be in status `cancelling` for up to +// 10 minutes, before changing to `cancelled`, where it will have partial results +// (if any) available in the output file. +func (r *BatchService) Cancel(ctx context.Context, batchID string, opts ...option.RequestOption) (res *Batch, err error) { + opts = append(r.Options[:], opts...) + if batchID == "" { + err = errors.New("missing required batch_id parameter") + return + } + path := fmt.Sprintf("batches/%s/cancel", batchID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +type Batch struct { + ID string `json:"id,required"` + // The time frame within which the batch should be processed. + CompletionWindow string `json:"completion_window,required"` + // The Unix timestamp (in seconds) for when the batch was created. + CreatedAt int64 `json:"created_at,required"` + // The OpenAI API endpoint used by the batch. + Endpoint string `json:"endpoint,required"` + // The ID of the input file for the batch. + InputFileID string `json:"input_file_id,required"` + // The object type, which is always `batch`. + Object BatchObject `json:"object,required"` + // The current status of the batch. + Status BatchStatus `json:"status,required"` + // The Unix timestamp (in seconds) for when the batch was cancelled. + CancelledAt int64 `json:"cancelled_at"` + // The Unix timestamp (in seconds) for when the batch started cancelling. + CancellingAt int64 `json:"cancelling_at"` + // The Unix timestamp (in seconds) for when the batch was completed. + CompletedAt int64 `json:"completed_at"` + // The ID of the file containing the outputs of requests with errors. + ErrorFileID string `json:"error_file_id"` + Errors BatchErrors `json:"errors"` + // The Unix timestamp (in seconds) for when the batch expired. + ExpiredAt int64 `json:"expired_at"` + // The Unix timestamp (in seconds) for when the batch will expire. + ExpiresAt int64 `json:"expires_at"` + // The Unix timestamp (in seconds) for when the batch failed. + FailedAt int64 `json:"failed_at"` + // The Unix timestamp (in seconds) for when the batch started finalizing. + FinalizingAt int64 `json:"finalizing_at"` + // The Unix timestamp (in seconds) for when the batch started processing. + InProgressAt int64 `json:"in_progress_at"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,nullable"` + // The ID of the file containing the outputs of successfully executed requests. + OutputFileID string `json:"output_file_id"` + // The request counts for different statuses within the batch. + RequestCounts BatchRequestCounts `json:"request_counts"` + JSON batchJSON `json:"-"` +} + +// batchJSON contains the JSON metadata for the struct [Batch] +type batchJSON struct { + ID apijson.Field + CompletionWindow apijson.Field + CreatedAt apijson.Field + Endpoint apijson.Field + InputFileID apijson.Field + Object apijson.Field + Status apijson.Field + CancelledAt apijson.Field + CancellingAt apijson.Field + CompletedAt apijson.Field + ErrorFileID apijson.Field + Errors apijson.Field + ExpiredAt apijson.Field + ExpiresAt apijson.Field + FailedAt apijson.Field + FinalizingAt apijson.Field + InProgressAt apijson.Field + Metadata apijson.Field + OutputFileID apijson.Field + RequestCounts apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Batch) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r batchJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `batch`. +type BatchObject string + +const ( + BatchObjectBatch BatchObject = "batch" +) + +func (r BatchObject) IsKnown() bool { + switch r { + case BatchObjectBatch: + return true + } + return false +} + +// The current status of the batch. +type BatchStatus string + +const ( + BatchStatusValidating BatchStatus = "validating" + BatchStatusFailed BatchStatus = "failed" + BatchStatusInProgress BatchStatus = "in_progress" + BatchStatusFinalizing BatchStatus = "finalizing" + BatchStatusCompleted BatchStatus = "completed" + BatchStatusExpired BatchStatus = "expired" + BatchStatusCancelling BatchStatus = "cancelling" + BatchStatusCancelled BatchStatus = "cancelled" +) + +func (r BatchStatus) IsKnown() bool { + switch r { + case BatchStatusValidating, BatchStatusFailed, BatchStatusInProgress, BatchStatusFinalizing, BatchStatusCompleted, BatchStatusExpired, BatchStatusCancelling, BatchStatusCancelled: + return true + } + return false +} + +type BatchErrors struct { + Data []BatchError `json:"data"` + // The object type, which is always `list`. + Object string `json:"object"` + JSON batchErrorsJSON `json:"-"` +} + +// batchErrorsJSON contains the JSON metadata for the struct [BatchErrors] +type batchErrorsJSON struct { + Data apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BatchErrors) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r batchErrorsJSON) RawJSON() string { + return r.raw +} + +type BatchError struct { + // An error code identifying the error type. + Code string `json:"code"` + // The line number of the input file where the error occurred, if applicable. + Line int64 `json:"line,nullable"` + // A human-readable message providing more details about the error. + Message string `json:"message"` + // The name of the parameter that caused the error, if applicable. + Param string `json:"param,nullable"` + JSON batchErrorJSON `json:"-"` +} + +// batchErrorJSON contains the JSON metadata for the struct [BatchError] +type batchErrorJSON struct { + Code apijson.Field + Line apijson.Field + Message apijson.Field + Param apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BatchError) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r batchErrorJSON) RawJSON() string { + return r.raw +} + +// The request counts for different statuses within the batch. +type BatchRequestCounts struct { + // Number of requests that have been completed successfully. + Completed int64 `json:"completed,required"` + // Number of requests that have failed. + Failed int64 `json:"failed,required"` + // Total number of requests in the batch. + Total int64 `json:"total,required"` + JSON batchRequestCountsJSON `json:"-"` +} + +// batchRequestCountsJSON contains the JSON metadata for the struct +// [BatchRequestCounts] +type batchRequestCountsJSON struct { + Completed apijson.Field + Failed apijson.Field + Total apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BatchRequestCounts) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r batchRequestCountsJSON) RawJSON() string { + return r.raw +} + +type BatchNewParams struct { + // The time frame within which the batch should be processed. Currently only `24h` + // is supported. + CompletionWindow param.Field[BatchNewParamsCompletionWindow] `json:"completion_window,required"` + // The endpoint to be used for all requests in the batch. Currently + // `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + // Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + // embedding inputs across all requests in the batch. + Endpoint param.Field[BatchNewParamsEndpoint] `json:"endpoint,required"` + // The ID of an uploaded file that contains requests for the new batch. + // + // See [upload file](https://platform.openai.com/docs/api-reference/files/create) + // for how to upload a file. + // + // Your input file must be formatted as a + // [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + // and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + // requests, and can be up to 100 MB in size. + InputFileID param.Field[string] `json:"input_file_id,required"` + // Optional custom metadata for the batch. + Metadata param.Field[map[string]string] `json:"metadata"` +} + +func (r BatchNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The time frame within which the batch should be processed. Currently only `24h` +// is supported. +type BatchNewParamsCompletionWindow string + +const ( + BatchNewParamsCompletionWindow24h BatchNewParamsCompletionWindow = "24h" +) + +func (r BatchNewParamsCompletionWindow) IsKnown() bool { + switch r { + case BatchNewParamsCompletionWindow24h: + return true + } + return false +} + +// The endpoint to be used for all requests in the batch. Currently +// `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. +// Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 +// embedding inputs across all requests in the batch. +type BatchNewParamsEndpoint string + +const ( + BatchNewParamsEndpointV1ChatCompletions BatchNewParamsEndpoint = "/v1/chat/completions" + BatchNewParamsEndpointV1Embeddings BatchNewParamsEndpoint = "/v1/embeddings" + BatchNewParamsEndpointV1Completions BatchNewParamsEndpoint = "/v1/completions" +) + +func (r BatchNewParamsEndpoint) IsKnown() bool { + switch r { + case BatchNewParamsEndpointV1ChatCompletions, BatchNewParamsEndpointV1Embeddings, BatchNewParamsEndpointV1Completions: + return true + } + return false +} + +type BatchListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` +} + +// URLQuery serializes [BatchListParams]'s query parameters as `url.Values`. +func (r BatchListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/batch_test.go b/batch_test.go new file mode 100644 index 0000000..86272d5 --- /dev/null +++ b/batch_test.go @@ -0,0 +1,112 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestBatchNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Batches.New(context.TODO(), openai.BatchNewParams{ + CompletionWindow: openai.F(openai.BatchNewParamsCompletionWindow24h), + Endpoint: openai.F(openai.BatchNewParamsEndpointV1ChatCompletions), + InputFileID: openai.F("input_file_id"), + Metadata: openai.F(map[string]string{ + "foo": "string", + }), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBatchGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Batches.Get(context.TODO(), "batch_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBatchListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Batches.List(context.TODO(), openai.BatchListParams{ + After: openai.F("after"), + Limit: openai.F(int64(0)), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBatchCancel(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Batches.Cancel(context.TODO(), "batch_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/beta.go b/beta.go new file mode 100644 index 0000000..b5c4db0 --- /dev/null +++ b/beta.go @@ -0,0 +1,32 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "github.com/openai/openai-go/option" +) + +// BetaService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaService] method instead. +type BetaService struct { + Options []option.RequestOption + VectorStores *BetaVectorStoreService + Assistants *BetaAssistantService + Threads *BetaThreadService +} + +// NewBetaService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewBetaService(opts ...option.RequestOption) (r *BetaService) { + r = &BetaService{} + r.Options = opts + r.VectorStores = NewBetaVectorStoreService(opts...) + r.Assistants = NewBetaAssistantService(opts...) + r.Threads = NewBetaThreadService(opts...) + return +} diff --git a/betaassistant.go b/betaassistant.go new file mode 100644 index 0000000..f6a4b76 --- /dev/null +++ b/betaassistant.go @@ -0,0 +1,2393 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" + "github.com/tidwall/gjson" +) + +// BetaAssistantService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaAssistantService] method instead. +type BetaAssistantService struct { + Options []option.RequestOption +} + +// NewBetaAssistantService generates a new service that applies the given options +// to each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewBetaAssistantService(opts ...option.RequestOption) (r *BetaAssistantService) { + r = &BetaAssistantService{} + r.Options = opts + return +} + +// Create an assistant with a model and instructions. +func (r *BetaAssistantService) New(ctx context.Context, body BetaAssistantNewParams, opts ...option.RequestOption) (res *Assistant, err error) { + opts = append(r.Options[:], opts...) + path := "assistants" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieves an assistant. +func (r *BetaAssistantService) Get(ctx context.Context, assistantID string, opts ...option.RequestOption) (res *Assistant, err error) { + opts = append(r.Options[:], opts...) + if assistantID == "" { + err = errors.New("missing required assistant_id parameter") + return + } + path := fmt.Sprintf("assistants/%s", assistantID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Modifies an assistant. +func (r *BetaAssistantService) Update(ctx context.Context, assistantID string, body BetaAssistantUpdateParams, opts ...option.RequestOption) (res *Assistant, err error) { + opts = append(r.Options[:], opts...) + if assistantID == "" { + err = errors.New("missing required assistant_id parameter") + return + } + path := fmt.Sprintf("assistants/%s", assistantID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Returns a list of assistants. +func (r *BetaAssistantService) List(ctx context.Context, query BetaAssistantListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Assistant], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + path := "assistants" + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of assistants. +func (r *BetaAssistantService) ListAutoPaging(ctx context.Context, query BetaAssistantListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[Assistant] { + return pagination.NewCursorPageAutoPager(r.List(ctx, query, opts...)) +} + +// Delete an assistant. +func (r *BetaAssistantService) Delete(ctx context.Context, assistantID string, opts ...option.RequestOption) (res *AssistantDeleted, err error) { + opts = append(r.Options[:], opts...) + if assistantID == "" { + err = errors.New("missing required assistant_id parameter") + return + } + path := fmt.Sprintf("assistants/%s", assistantID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// Represents an `assistant` that can call the model and use tools. +type Assistant struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the assistant was created. + CreatedAt int64 `json:"created_at,required"` + // The description of the assistant. The maximum length is 512 characters. + Description string `json:"description,required,nullable"` + // The system instructions that the assistant uses. The maximum length is 256,000 + // characters. + Instructions string `json:"instructions,required,nullable"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,required,nullable"` + // ID of the model to use. You can use the + // [List models](https://platform.openai.com/docs/api-reference/models/list) API to + // see all of your available models, or see our + // [Model overview](https://platform.openai.com/docs/models/overview) for + // descriptions of them. + Model string `json:"model,required"` + // The name of the assistant. The maximum length is 256 characters. + Name string `json:"name,required,nullable"` + // The object type, which is always `assistant`. + Object AssistantObject `json:"object,required"` + // A list of tool enabled on the assistant. There can be a maximum of 128 tools per + // assistant. Tools can be of types `code_interpreter`, `file_search`, or + // `function`. + Tools []AssistantTool `json:"tools,required"` + // Specifies the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,nullable"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + Temperature float64 `json:"temperature,nullable"` + // A set of resources that are used by the assistant's tools. The resources are + // specific to the type of tool. For example, the `code_interpreter` tool requires + // a list of file IDs, while the `file_search` tool requires a list of vector store + // IDs. + ToolResources AssistantToolResources `json:"tool_resources,nullable"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or temperature but not both. + TopP float64 `json:"top_p,nullable"` + JSON assistantJSON `json:"-"` +} + +// assistantJSON contains the JSON metadata for the struct [Assistant] +type assistantJSON struct { + ID apijson.Field + CreatedAt apijson.Field + Description apijson.Field + Instructions apijson.Field + Metadata apijson.Field + Model apijson.Field + Name apijson.Field + Object apijson.Field + Tools apijson.Field + ResponseFormat apijson.Field + Temperature apijson.Field + ToolResources apijson.Field + TopP apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Assistant) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `assistant`. +type AssistantObject string + +const ( + AssistantObjectAssistant AssistantObject = "assistant" +) + +func (r AssistantObject) IsKnown() bool { + switch r { + case AssistantObjectAssistant: + return true + } + return false +} + +// A set of resources that are used by the assistant's tools. The resources are +// specific to the type of tool. For example, the `code_interpreter` tool requires +// a list of file IDs, while the `file_search` tool requires a list of vector store +// IDs. +type AssistantToolResources struct { + CodeInterpreter AssistantToolResourcesCodeInterpreter `json:"code_interpreter"` + FileSearch AssistantToolResourcesFileSearch `json:"file_search"` + JSON assistantToolResourcesJSON `json:"-"` +} + +// assistantToolResourcesJSON contains the JSON metadata for the struct +// [AssistantToolResources] +type assistantToolResourcesJSON struct { + CodeInterpreter apijson.Field + FileSearch apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantToolResources) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantToolResourcesJSON) RawJSON() string { + return r.raw +} + +type AssistantToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter“ tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs []string `json:"file_ids"` + JSON assistantToolResourcesCodeInterpreterJSON `json:"-"` +} + +// assistantToolResourcesCodeInterpreterJSON contains the JSON metadata for the +// struct [AssistantToolResourcesCodeInterpreter] +type assistantToolResourcesCodeInterpreterJSON struct { + FileIDs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantToolResourcesCodeInterpreter) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantToolResourcesCodeInterpreterJSON) RawJSON() string { + return r.raw +} + +type AssistantToolResourcesFileSearch struct { + // The ID of the + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this assistant. There can be a maximum of 1 vector store attached to + // the assistant. + VectorStoreIDs []string `json:"vector_store_ids"` + JSON assistantToolResourcesFileSearchJSON `json:"-"` +} + +// assistantToolResourcesFileSearchJSON contains the JSON metadata for the struct +// [AssistantToolResourcesFileSearch] +type assistantToolResourcesFileSearchJSON struct { + VectorStoreIDs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantToolResourcesFileSearch) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantToolResourcesFileSearchJSON) RawJSON() string { + return r.raw +} + +type AssistantDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object AssistantDeletedObject `json:"object,required"` + JSON assistantDeletedJSON `json:"-"` +} + +// assistantDeletedJSON contains the JSON metadata for the struct +// [AssistantDeleted] +type assistantDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantDeletedJSON) RawJSON() string { + return r.raw +} + +type AssistantDeletedObject string + +const ( + AssistantDeletedObjectAssistantDeleted AssistantDeletedObject = "assistant.deleted" +) + +func (r AssistantDeletedObject) IsKnown() bool { + switch r { + case AssistantDeletedObjectAssistantDeleted: + return true + } + return false +} + +// Represents an event emitted when streaming a Run. +// +// Each event in a server-sent events stream has an `event` and `data` property: +// +// ``` +// event: thread.created +// data: {"id": "thread_123", "object": "thread", ...} +// ``` +// +// We emit events whenever a new object is created, transitions to a new state, or +// is being streamed in parts (deltas). For example, we emit `thread.run.created` +// when a new run is created, `thread.run.completed` when a run completes, and so +// on. When an Assistant chooses to create a message during a run, we emit a +// `thread.message.created event`, a `thread.message.in_progress` event, many +// `thread.message.delta` events, and finally a `thread.message.completed` event. +// +// We may add additional events over time, so we recommend handling unknown events +// gracefully in your code. See the +// [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) +// to learn how to integrate the Assistants API with streaming. +type AssistantStreamEvent struct { + Event AssistantStreamEventEvent `json:"event,required"` + // This field can have the runtime type of [Thread], [Run], [RunStep], + // [RunStepDeltaEvent], [Message], [MessageDeltaEvent], [shared.ErrorObject]. + Data interface{} `json:"data"` + JSON assistantStreamEventJSON `json:"-"` + union AssistantStreamEventUnion +} + +// assistantStreamEventJSON contains the JSON metadata for the struct +// [AssistantStreamEvent] +type assistantStreamEventJSON struct { + Event apijson.Field + Data apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r assistantStreamEventJSON) RawJSON() string { + return r.raw +} + +func (r *AssistantStreamEvent) UnmarshalJSON(data []byte) (err error) { + *r = AssistantStreamEvent{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [AssistantStreamEventUnion] interface which you can cast to +// the specific types for more type safety. +// +// Possible runtime types of the union are [AssistantStreamEventThreadCreated], +// [AssistantStreamEventThreadRunCreated], [AssistantStreamEventThreadRunQueued], +// [AssistantStreamEventThreadRunInProgress], +// [AssistantStreamEventThreadRunRequiresAction], +// [AssistantStreamEventThreadRunCompleted], +// [AssistantStreamEventThreadRunIncomplete], +// [AssistantStreamEventThreadRunFailed], +// [AssistantStreamEventThreadRunCancelling], +// [AssistantStreamEventThreadRunCancelled], +// [AssistantStreamEventThreadRunExpired], +// [AssistantStreamEventThreadRunStepCreated], +// [AssistantStreamEventThreadRunStepInProgress], +// [AssistantStreamEventThreadRunStepDelta], +// [AssistantStreamEventThreadRunStepCompleted], +// [AssistantStreamEventThreadRunStepFailed], +// [AssistantStreamEventThreadRunStepCancelled], +// [AssistantStreamEventThreadRunStepExpired], +// [AssistantStreamEventThreadMessageCreated], +// [AssistantStreamEventThreadMessageInProgress], +// [AssistantStreamEventThreadMessageDelta], +// [AssistantStreamEventThreadMessageCompleted], +// [AssistantStreamEventThreadMessageIncomplete], [AssistantStreamEventErrorEvent]. +func (r AssistantStreamEvent) AsUnion() AssistantStreamEventUnion { + return r.union +} + +// Represents an event emitted when streaming a Run. +// +// Each event in a server-sent events stream has an `event` and `data` property: +// +// ``` +// event: thread.created +// data: {"id": "thread_123", "object": "thread", ...} +// ``` +// +// We emit events whenever a new object is created, transitions to a new state, or +// is being streamed in parts (deltas). For example, we emit `thread.run.created` +// when a new run is created, `thread.run.completed` when a run completes, and so +// on. When an Assistant chooses to create a message during a run, we emit a +// `thread.message.created event`, a `thread.message.in_progress` event, many +// `thread.message.delta` events, and finally a `thread.message.completed` event. +// +// We may add additional events over time, so we recommend handling unknown events +// gracefully in your code. See the +// [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) +// to learn how to integrate the Assistants API with streaming. +// +// Union satisfied by [AssistantStreamEventThreadCreated], +// [AssistantStreamEventThreadRunCreated], [AssistantStreamEventThreadRunQueued], +// [AssistantStreamEventThreadRunInProgress], +// [AssistantStreamEventThreadRunRequiresAction], +// [AssistantStreamEventThreadRunCompleted], +// [AssistantStreamEventThreadRunIncomplete], +// [AssistantStreamEventThreadRunFailed], +// [AssistantStreamEventThreadRunCancelling], +// [AssistantStreamEventThreadRunCancelled], +// [AssistantStreamEventThreadRunExpired], +// [AssistantStreamEventThreadRunStepCreated], +// [AssistantStreamEventThreadRunStepInProgress], +// [AssistantStreamEventThreadRunStepDelta], +// [AssistantStreamEventThreadRunStepCompleted], +// [AssistantStreamEventThreadRunStepFailed], +// [AssistantStreamEventThreadRunStepCancelled], +// [AssistantStreamEventThreadRunStepExpired], +// [AssistantStreamEventThreadMessageCreated], +// [AssistantStreamEventThreadMessageInProgress], +// [AssistantStreamEventThreadMessageDelta], +// [AssistantStreamEventThreadMessageCompleted], +// [AssistantStreamEventThreadMessageIncomplete] or +// [AssistantStreamEventErrorEvent]. +type AssistantStreamEventUnion interface { + implementsAssistantStreamEvent() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*AssistantStreamEventUnion)(nil)).Elem(), + "event", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadCreated{}), + DiscriminatorValue: "thread.created", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunCreated{}), + DiscriminatorValue: "thread.run.created", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunQueued{}), + DiscriminatorValue: "thread.run.queued", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunInProgress{}), + DiscriminatorValue: "thread.run.in_progress", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunRequiresAction{}), + DiscriminatorValue: "thread.run.requires_action", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunCompleted{}), + DiscriminatorValue: "thread.run.completed", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunIncomplete{}), + DiscriminatorValue: "thread.run.incomplete", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunFailed{}), + DiscriminatorValue: "thread.run.failed", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunCancelling{}), + DiscriminatorValue: "thread.run.cancelling", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunCancelled{}), + DiscriminatorValue: "thread.run.cancelled", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunExpired{}), + DiscriminatorValue: "thread.run.expired", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepCreated{}), + DiscriminatorValue: "thread.run.step.created", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepInProgress{}), + DiscriminatorValue: "thread.run.step.in_progress", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepDelta{}), + DiscriminatorValue: "thread.run.step.delta", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepCompleted{}), + DiscriminatorValue: "thread.run.step.completed", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepFailed{}), + DiscriminatorValue: "thread.run.step.failed", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepCancelled{}), + DiscriminatorValue: "thread.run.step.cancelled", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadRunStepExpired{}), + DiscriminatorValue: "thread.run.step.expired", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadMessageCreated{}), + DiscriminatorValue: "thread.message.created", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadMessageInProgress{}), + DiscriminatorValue: "thread.message.in_progress", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadMessageDelta{}), + DiscriminatorValue: "thread.message.delta", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadMessageCompleted{}), + DiscriminatorValue: "thread.message.completed", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventThreadMessageIncomplete{}), + DiscriminatorValue: "thread.message.incomplete", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantStreamEventErrorEvent{}), + DiscriminatorValue: "error", + }, + ) +} + +// Occurs when a new +// [thread](https://platform.openai.com/docs/api-reference/threads/object) is +// created. +type AssistantStreamEventThreadCreated struct { + // Represents a thread that contains + // [messages](https://platform.openai.com/docs/api-reference/messages). + Data Thread `json:"data,required"` + Event AssistantStreamEventThreadCreatedEvent `json:"event,required"` + JSON assistantStreamEventThreadCreatedJSON `json:"-"` +} + +// assistantStreamEventThreadCreatedJSON contains the JSON metadata for the struct +// [AssistantStreamEventThreadCreated] +type assistantStreamEventThreadCreatedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadCreated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadCreatedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadCreated) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadCreatedEvent string + +const ( + AssistantStreamEventThreadCreatedEventThreadCreated AssistantStreamEventThreadCreatedEvent = "thread.created" +) + +func (r AssistantStreamEventThreadCreatedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadCreatedEventThreadCreated: + return true + } + return false +} + +// Occurs when a new +// [run](https://platform.openai.com/docs/api-reference/runs/object) is created. +type AssistantStreamEventThreadRunCreated struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunCreatedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunCreatedJSON `json:"-"` +} + +// assistantStreamEventThreadRunCreatedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunCreated] +type assistantStreamEventThreadRunCreatedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunCreated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunCreatedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunCreated) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunCreatedEvent string + +const ( + AssistantStreamEventThreadRunCreatedEventThreadRunCreated AssistantStreamEventThreadRunCreatedEvent = "thread.run.created" +) + +func (r AssistantStreamEventThreadRunCreatedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunCreatedEventThreadRunCreated: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// moves to a `queued` status. +type AssistantStreamEventThreadRunQueued struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunQueuedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunQueuedJSON `json:"-"` +} + +// assistantStreamEventThreadRunQueuedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunQueued] +type assistantStreamEventThreadRunQueuedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunQueued) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunQueuedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunQueued) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunQueuedEvent string + +const ( + AssistantStreamEventThreadRunQueuedEventThreadRunQueued AssistantStreamEventThreadRunQueuedEvent = "thread.run.queued" +) + +func (r AssistantStreamEventThreadRunQueuedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunQueuedEventThreadRunQueued: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// moves to an `in_progress` status. +type AssistantStreamEventThreadRunInProgress struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunInProgressEvent `json:"event,required"` + JSON assistantStreamEventThreadRunInProgressJSON `json:"-"` +} + +// assistantStreamEventThreadRunInProgressJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunInProgress] +type assistantStreamEventThreadRunInProgressJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunInProgress) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunInProgressJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunInProgress) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunInProgressEvent string + +const ( + AssistantStreamEventThreadRunInProgressEventThreadRunInProgress AssistantStreamEventThreadRunInProgressEvent = "thread.run.in_progress" +) + +func (r AssistantStreamEventThreadRunInProgressEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunInProgressEventThreadRunInProgress: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// moves to a `requires_action` status. +type AssistantStreamEventThreadRunRequiresAction struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunRequiresActionEvent `json:"event,required"` + JSON assistantStreamEventThreadRunRequiresActionJSON `json:"-"` +} + +// assistantStreamEventThreadRunRequiresActionJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadRunRequiresAction] +type assistantStreamEventThreadRunRequiresActionJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunRequiresAction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunRequiresActionJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunRequiresAction) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunRequiresActionEvent string + +const ( + AssistantStreamEventThreadRunRequiresActionEventThreadRunRequiresAction AssistantStreamEventThreadRunRequiresActionEvent = "thread.run.requires_action" +) + +func (r AssistantStreamEventThreadRunRequiresActionEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunRequiresActionEventThreadRunRequiresAction: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// is completed. +type AssistantStreamEventThreadRunCompleted struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunCompletedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunCompletedJSON `json:"-"` +} + +// assistantStreamEventThreadRunCompletedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunCompleted] +type assistantStreamEventThreadRunCompletedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunCompleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunCompletedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunCompleted) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunCompletedEvent string + +const ( + AssistantStreamEventThreadRunCompletedEventThreadRunCompleted AssistantStreamEventThreadRunCompletedEvent = "thread.run.completed" +) + +func (r AssistantStreamEventThreadRunCompletedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunCompletedEventThreadRunCompleted: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// ends with status `incomplete`. +type AssistantStreamEventThreadRunIncomplete struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunIncompleteEvent `json:"event,required"` + JSON assistantStreamEventThreadRunIncompleteJSON `json:"-"` +} + +// assistantStreamEventThreadRunIncompleteJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunIncomplete] +type assistantStreamEventThreadRunIncompleteJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunIncomplete) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunIncompleteJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunIncomplete) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunIncompleteEvent string + +const ( + AssistantStreamEventThreadRunIncompleteEventThreadRunIncomplete AssistantStreamEventThreadRunIncompleteEvent = "thread.run.incomplete" +) + +func (r AssistantStreamEventThreadRunIncompleteEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunIncompleteEventThreadRunIncomplete: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// fails. +type AssistantStreamEventThreadRunFailed struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunFailedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunFailedJSON `json:"-"` +} + +// assistantStreamEventThreadRunFailedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunFailed] +type assistantStreamEventThreadRunFailedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunFailed) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunFailedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunFailed) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunFailedEvent string + +const ( + AssistantStreamEventThreadRunFailedEventThreadRunFailed AssistantStreamEventThreadRunFailedEvent = "thread.run.failed" +) + +func (r AssistantStreamEventThreadRunFailedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunFailedEventThreadRunFailed: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// moves to a `cancelling` status. +type AssistantStreamEventThreadRunCancelling struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunCancellingEvent `json:"event,required"` + JSON assistantStreamEventThreadRunCancellingJSON `json:"-"` +} + +// assistantStreamEventThreadRunCancellingJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunCancelling] +type assistantStreamEventThreadRunCancellingJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunCancelling) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunCancellingJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunCancelling) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunCancellingEvent string + +const ( + AssistantStreamEventThreadRunCancellingEventThreadRunCancelling AssistantStreamEventThreadRunCancellingEvent = "thread.run.cancelling" +) + +func (r AssistantStreamEventThreadRunCancellingEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunCancellingEventThreadRunCancelling: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// is cancelled. +type AssistantStreamEventThreadRunCancelled struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunCancelledEvent `json:"event,required"` + JSON assistantStreamEventThreadRunCancelledJSON `json:"-"` +} + +// assistantStreamEventThreadRunCancelledJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunCancelled] +type assistantStreamEventThreadRunCancelledJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunCancelled) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunCancelledJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunCancelled) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunCancelledEvent string + +const ( + AssistantStreamEventThreadRunCancelledEventThreadRunCancelled AssistantStreamEventThreadRunCancelledEvent = "thread.run.cancelled" +) + +func (r AssistantStreamEventThreadRunCancelledEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunCancelledEventThreadRunCancelled: + return true + } + return false +} + +// Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) +// expires. +type AssistantStreamEventThreadRunExpired struct { + // Represents an execution run on a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Run `json:"data,required"` + Event AssistantStreamEventThreadRunExpiredEvent `json:"event,required"` + JSON assistantStreamEventThreadRunExpiredJSON `json:"-"` +} + +// assistantStreamEventThreadRunExpiredJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunExpired] +type assistantStreamEventThreadRunExpiredJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunExpired) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunExpiredJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunExpired) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunExpiredEvent string + +const ( + AssistantStreamEventThreadRunExpiredEventThreadRunExpired AssistantStreamEventThreadRunExpiredEvent = "thread.run.expired" +) + +func (r AssistantStreamEventThreadRunExpiredEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunExpiredEventThreadRunExpired: + return true + } + return false +} + +// Occurs when a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is +// created. +type AssistantStreamEventThreadRunStepCreated struct { + // Represents a step in execution of a run. + Data RunStep `json:"data,required"` + Event AssistantStreamEventThreadRunStepCreatedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepCreatedJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepCreatedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunStepCreated] +type assistantStreamEventThreadRunStepCreatedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepCreated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepCreatedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepCreated) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepCreatedEvent string + +const ( + AssistantStreamEventThreadRunStepCreatedEventThreadRunStepCreated AssistantStreamEventThreadRunStepCreatedEvent = "thread.run.step.created" +) + +func (r AssistantStreamEventThreadRunStepCreatedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepCreatedEventThreadRunStepCreated: + return true + } + return false +} + +// Occurs when a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) +// moves to an `in_progress` state. +type AssistantStreamEventThreadRunStepInProgress struct { + // Represents a step in execution of a run. + Data RunStep `json:"data,required"` + Event AssistantStreamEventThreadRunStepInProgressEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepInProgressJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepInProgressJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadRunStepInProgress] +type assistantStreamEventThreadRunStepInProgressJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepInProgress) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepInProgressJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepInProgress) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepInProgressEvent string + +const ( + AssistantStreamEventThreadRunStepInProgressEventThreadRunStepInProgress AssistantStreamEventThreadRunStepInProgressEvent = "thread.run.step.in_progress" +) + +func (r AssistantStreamEventThreadRunStepInProgressEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepInProgressEventThreadRunStepInProgress: + return true + } + return false +} + +// Occurs when parts of a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are +// being streamed. +type AssistantStreamEventThreadRunStepDelta struct { + // Represents a run step delta i.e. any changed fields on a run step during + // streaming. + Data RunStepDeltaEvent `json:"data,required"` + Event AssistantStreamEventThreadRunStepDeltaEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepDeltaJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepDeltaJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunStepDelta] +type assistantStreamEventThreadRunStepDeltaJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepDeltaJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepDelta) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepDeltaEvent string + +const ( + AssistantStreamEventThreadRunStepDeltaEventThreadRunStepDelta AssistantStreamEventThreadRunStepDeltaEvent = "thread.run.step.delta" +) + +func (r AssistantStreamEventThreadRunStepDeltaEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepDeltaEventThreadRunStepDelta: + return true + } + return false +} + +// Occurs when a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is +// completed. +type AssistantStreamEventThreadRunStepCompleted struct { + // Represents a step in execution of a run. + Data RunStep `json:"data,required"` + Event AssistantStreamEventThreadRunStepCompletedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepCompletedJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepCompletedJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadRunStepCompleted] +type assistantStreamEventThreadRunStepCompletedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepCompleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepCompletedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepCompleted) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepCompletedEvent string + +const ( + AssistantStreamEventThreadRunStepCompletedEventThreadRunStepCompleted AssistantStreamEventThreadRunStepCompletedEvent = "thread.run.step.completed" +) + +func (r AssistantStreamEventThreadRunStepCompletedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepCompletedEventThreadRunStepCompleted: + return true + } + return false +} + +// Occurs when a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) +// fails. +type AssistantStreamEventThreadRunStepFailed struct { + // Represents a step in execution of a run. + Data RunStep `json:"data,required"` + Event AssistantStreamEventThreadRunStepFailedEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepFailedJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepFailedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunStepFailed] +type assistantStreamEventThreadRunStepFailedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepFailed) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepFailedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepFailed) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepFailedEvent string + +const ( + AssistantStreamEventThreadRunStepFailedEventThreadRunStepFailed AssistantStreamEventThreadRunStepFailedEvent = "thread.run.step.failed" +) + +func (r AssistantStreamEventThreadRunStepFailedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepFailedEventThreadRunStepFailed: + return true + } + return false +} + +// Occurs when a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is +// cancelled. +type AssistantStreamEventThreadRunStepCancelled struct { + // Represents a step in execution of a run. + Data RunStep `json:"data,required"` + Event AssistantStreamEventThreadRunStepCancelledEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepCancelledJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepCancelledJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadRunStepCancelled] +type assistantStreamEventThreadRunStepCancelledJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepCancelled) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepCancelledJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepCancelled) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepCancelledEvent string + +const ( + AssistantStreamEventThreadRunStepCancelledEventThreadRunStepCancelled AssistantStreamEventThreadRunStepCancelledEvent = "thread.run.step.cancelled" +) + +func (r AssistantStreamEventThreadRunStepCancelledEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepCancelledEventThreadRunStepCancelled: + return true + } + return false +} + +// Occurs when a +// [run step](https://platform.openai.com/docs/api-reference/runs/step-object) +// expires. +type AssistantStreamEventThreadRunStepExpired struct { + // Represents a step in execution of a run. + Data RunStep `json:"data,required"` + Event AssistantStreamEventThreadRunStepExpiredEvent `json:"event,required"` + JSON assistantStreamEventThreadRunStepExpiredJSON `json:"-"` +} + +// assistantStreamEventThreadRunStepExpiredJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadRunStepExpired] +type assistantStreamEventThreadRunStepExpiredJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadRunStepExpired) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadRunStepExpiredJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadRunStepExpired) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadRunStepExpiredEvent string + +const ( + AssistantStreamEventThreadRunStepExpiredEventThreadRunStepExpired AssistantStreamEventThreadRunStepExpiredEvent = "thread.run.step.expired" +) + +func (r AssistantStreamEventThreadRunStepExpiredEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadRunStepExpiredEventThreadRunStepExpired: + return true + } + return false +} + +// Occurs when a +// [message](https://platform.openai.com/docs/api-reference/messages/object) is +// created. +type AssistantStreamEventThreadMessageCreated struct { + // Represents a message within a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Message `json:"data,required"` + Event AssistantStreamEventThreadMessageCreatedEvent `json:"event,required"` + JSON assistantStreamEventThreadMessageCreatedJSON `json:"-"` +} + +// assistantStreamEventThreadMessageCreatedJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadMessageCreated] +type assistantStreamEventThreadMessageCreatedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadMessageCreated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadMessageCreatedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadMessageCreated) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadMessageCreatedEvent string + +const ( + AssistantStreamEventThreadMessageCreatedEventThreadMessageCreated AssistantStreamEventThreadMessageCreatedEvent = "thread.message.created" +) + +func (r AssistantStreamEventThreadMessageCreatedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadMessageCreatedEventThreadMessageCreated: + return true + } + return false +} + +// Occurs when a +// [message](https://platform.openai.com/docs/api-reference/messages/object) moves +// to an `in_progress` state. +type AssistantStreamEventThreadMessageInProgress struct { + // Represents a message within a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Message `json:"data,required"` + Event AssistantStreamEventThreadMessageInProgressEvent `json:"event,required"` + JSON assistantStreamEventThreadMessageInProgressJSON `json:"-"` +} + +// assistantStreamEventThreadMessageInProgressJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadMessageInProgress] +type assistantStreamEventThreadMessageInProgressJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadMessageInProgress) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadMessageInProgressJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadMessageInProgress) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadMessageInProgressEvent string + +const ( + AssistantStreamEventThreadMessageInProgressEventThreadMessageInProgress AssistantStreamEventThreadMessageInProgressEvent = "thread.message.in_progress" +) + +func (r AssistantStreamEventThreadMessageInProgressEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadMessageInProgressEventThreadMessageInProgress: + return true + } + return false +} + +// Occurs when parts of a +// [Message](https://platform.openai.com/docs/api-reference/messages/object) are +// being streamed. +type AssistantStreamEventThreadMessageDelta struct { + // Represents a message delta i.e. any changed fields on a message during + // streaming. + Data MessageDeltaEvent `json:"data,required"` + Event AssistantStreamEventThreadMessageDeltaEvent `json:"event,required"` + JSON assistantStreamEventThreadMessageDeltaJSON `json:"-"` +} + +// assistantStreamEventThreadMessageDeltaJSON contains the JSON metadata for the +// struct [AssistantStreamEventThreadMessageDelta] +type assistantStreamEventThreadMessageDeltaJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadMessageDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadMessageDeltaJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadMessageDelta) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadMessageDeltaEvent string + +const ( + AssistantStreamEventThreadMessageDeltaEventThreadMessageDelta AssistantStreamEventThreadMessageDeltaEvent = "thread.message.delta" +) + +func (r AssistantStreamEventThreadMessageDeltaEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadMessageDeltaEventThreadMessageDelta: + return true + } + return false +} + +// Occurs when a +// [message](https://platform.openai.com/docs/api-reference/messages/object) is +// completed. +type AssistantStreamEventThreadMessageCompleted struct { + // Represents a message within a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Message `json:"data,required"` + Event AssistantStreamEventThreadMessageCompletedEvent `json:"event,required"` + JSON assistantStreamEventThreadMessageCompletedJSON `json:"-"` +} + +// assistantStreamEventThreadMessageCompletedJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadMessageCompleted] +type assistantStreamEventThreadMessageCompletedJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadMessageCompleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadMessageCompletedJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadMessageCompleted) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadMessageCompletedEvent string + +const ( + AssistantStreamEventThreadMessageCompletedEventThreadMessageCompleted AssistantStreamEventThreadMessageCompletedEvent = "thread.message.completed" +) + +func (r AssistantStreamEventThreadMessageCompletedEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadMessageCompletedEventThreadMessageCompleted: + return true + } + return false +} + +// Occurs when a +// [message](https://platform.openai.com/docs/api-reference/messages/object) ends +// before it is completed. +type AssistantStreamEventThreadMessageIncomplete struct { + // Represents a message within a + // [thread](https://platform.openai.com/docs/api-reference/threads). + Data Message `json:"data,required"` + Event AssistantStreamEventThreadMessageIncompleteEvent `json:"event,required"` + JSON assistantStreamEventThreadMessageIncompleteJSON `json:"-"` +} + +// assistantStreamEventThreadMessageIncompleteJSON contains the JSON metadata for +// the struct [AssistantStreamEventThreadMessageIncomplete] +type assistantStreamEventThreadMessageIncompleteJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventThreadMessageIncomplete) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventThreadMessageIncompleteJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventThreadMessageIncomplete) implementsAssistantStreamEvent() {} + +type AssistantStreamEventThreadMessageIncompleteEvent string + +const ( + AssistantStreamEventThreadMessageIncompleteEventThreadMessageIncomplete AssistantStreamEventThreadMessageIncompleteEvent = "thread.message.incomplete" +) + +func (r AssistantStreamEventThreadMessageIncompleteEvent) IsKnown() bool { + switch r { + case AssistantStreamEventThreadMessageIncompleteEventThreadMessageIncomplete: + return true + } + return false +} + +// Occurs when an +// [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs. +// This can happen due to an internal server error or a timeout. +type AssistantStreamEventErrorEvent struct { + Data shared.ErrorObject `json:"data,required"` + Event AssistantStreamEventErrorEventEvent `json:"event,required"` + JSON assistantStreamEventErrorEventJSON `json:"-"` +} + +// assistantStreamEventErrorEventJSON contains the JSON metadata for the struct +// [AssistantStreamEventErrorEvent] +type assistantStreamEventErrorEventJSON struct { + Data apijson.Field + Event apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantStreamEventErrorEvent) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantStreamEventErrorEventJSON) RawJSON() string { + return r.raw +} + +func (r AssistantStreamEventErrorEvent) implementsAssistantStreamEvent() {} + +type AssistantStreamEventErrorEventEvent string + +const ( + AssistantStreamEventErrorEventEventError AssistantStreamEventErrorEventEvent = "error" +) + +func (r AssistantStreamEventErrorEventEvent) IsKnown() bool { + switch r { + case AssistantStreamEventErrorEventEventError: + return true + } + return false +} + +type AssistantStreamEventEvent string + +const ( + AssistantStreamEventEventThreadCreated AssistantStreamEventEvent = "thread.created" + AssistantStreamEventEventThreadRunCreated AssistantStreamEventEvent = "thread.run.created" + AssistantStreamEventEventThreadRunQueued AssistantStreamEventEvent = "thread.run.queued" + AssistantStreamEventEventThreadRunInProgress AssistantStreamEventEvent = "thread.run.in_progress" + AssistantStreamEventEventThreadRunRequiresAction AssistantStreamEventEvent = "thread.run.requires_action" + AssistantStreamEventEventThreadRunCompleted AssistantStreamEventEvent = "thread.run.completed" + AssistantStreamEventEventThreadRunIncomplete AssistantStreamEventEvent = "thread.run.incomplete" + AssistantStreamEventEventThreadRunFailed AssistantStreamEventEvent = "thread.run.failed" + AssistantStreamEventEventThreadRunCancelling AssistantStreamEventEvent = "thread.run.cancelling" + AssistantStreamEventEventThreadRunCancelled AssistantStreamEventEvent = "thread.run.cancelled" + AssistantStreamEventEventThreadRunExpired AssistantStreamEventEvent = "thread.run.expired" + AssistantStreamEventEventThreadRunStepCreated AssistantStreamEventEvent = "thread.run.step.created" + AssistantStreamEventEventThreadRunStepInProgress AssistantStreamEventEvent = "thread.run.step.in_progress" + AssistantStreamEventEventThreadRunStepDelta AssistantStreamEventEvent = "thread.run.step.delta" + AssistantStreamEventEventThreadRunStepCompleted AssistantStreamEventEvent = "thread.run.step.completed" + AssistantStreamEventEventThreadRunStepFailed AssistantStreamEventEvent = "thread.run.step.failed" + AssistantStreamEventEventThreadRunStepCancelled AssistantStreamEventEvent = "thread.run.step.cancelled" + AssistantStreamEventEventThreadRunStepExpired AssistantStreamEventEvent = "thread.run.step.expired" + AssistantStreamEventEventThreadMessageCreated AssistantStreamEventEvent = "thread.message.created" + AssistantStreamEventEventThreadMessageInProgress AssistantStreamEventEvent = "thread.message.in_progress" + AssistantStreamEventEventThreadMessageDelta AssistantStreamEventEvent = "thread.message.delta" + AssistantStreamEventEventThreadMessageCompleted AssistantStreamEventEvent = "thread.message.completed" + AssistantStreamEventEventThreadMessageIncomplete AssistantStreamEventEvent = "thread.message.incomplete" + AssistantStreamEventEventError AssistantStreamEventEvent = "error" +) + +func (r AssistantStreamEventEvent) IsKnown() bool { + switch r { + case AssistantStreamEventEventThreadCreated, AssistantStreamEventEventThreadRunCreated, AssistantStreamEventEventThreadRunQueued, AssistantStreamEventEventThreadRunInProgress, AssistantStreamEventEventThreadRunRequiresAction, AssistantStreamEventEventThreadRunCompleted, AssistantStreamEventEventThreadRunIncomplete, AssistantStreamEventEventThreadRunFailed, AssistantStreamEventEventThreadRunCancelling, AssistantStreamEventEventThreadRunCancelled, AssistantStreamEventEventThreadRunExpired, AssistantStreamEventEventThreadRunStepCreated, AssistantStreamEventEventThreadRunStepInProgress, AssistantStreamEventEventThreadRunStepDelta, AssistantStreamEventEventThreadRunStepCompleted, AssistantStreamEventEventThreadRunStepFailed, AssistantStreamEventEventThreadRunStepCancelled, AssistantStreamEventEventThreadRunStepExpired, AssistantStreamEventEventThreadMessageCreated, AssistantStreamEventEventThreadMessageInProgress, AssistantStreamEventEventThreadMessageDelta, AssistantStreamEventEventThreadMessageCompleted, AssistantStreamEventEventThreadMessageIncomplete, AssistantStreamEventEventError: + return true + } + return false +} + +type AssistantTool struct { + // The type of tool being defined: `code_interpreter` + Type AssistantToolType `json:"type,required"` + // This field can have the runtime type of [FileSearchToolFileSearch]. + FileSearch interface{} `json:"file_search,required"` + Function shared.FunctionDefinition `json:"function"` + JSON assistantToolJSON `json:"-"` + union AssistantToolUnion +} + +// assistantToolJSON contains the JSON metadata for the struct [AssistantTool] +type assistantToolJSON struct { + Type apijson.Field + FileSearch apijson.Field + Function apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r assistantToolJSON) RawJSON() string { + return r.raw +} + +func (r *AssistantTool) UnmarshalJSON(data []byte) (err error) { + *r = AssistantTool{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [AssistantToolUnion] interface which you can cast to the +// specific types for more type safety. +// +// Possible runtime types of the union are [CodeInterpreterTool], [FileSearchTool], +// [FunctionTool]. +func (r AssistantTool) AsUnion() AssistantToolUnion { + return r.union +} + +// Union satisfied by [CodeInterpreterTool], [FileSearchTool] or [FunctionTool]. +type AssistantToolUnion interface { + implementsAssistantTool() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*AssistantToolUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterTool{}), + DiscriminatorValue: "code_interpreter", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FileSearchTool{}), + DiscriminatorValue: "file_search", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FunctionTool{}), + DiscriminatorValue: "function", + }, + ) +} + +// The type of tool being defined: `code_interpreter` +type AssistantToolType string + +const ( + AssistantToolTypeCodeInterpreter AssistantToolType = "code_interpreter" + AssistantToolTypeFileSearch AssistantToolType = "file_search" + AssistantToolTypeFunction AssistantToolType = "function" +) + +func (r AssistantToolType) IsKnown() bool { + switch r { + case AssistantToolTypeCodeInterpreter, AssistantToolTypeFileSearch, AssistantToolTypeFunction: + return true + } + return false +} + +type AssistantToolParam struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[AssistantToolType] `json:"type,required"` + FileSearch param.Field[interface{}] `json:"file_search,required"` + Function param.Field[shared.FunctionDefinitionParam] `json:"function"` +} + +func (r AssistantToolParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r AssistantToolParam) implementsAssistantToolUnionParam() {} + +// Satisfied by [CodeInterpreterToolParam], [FileSearchToolParam], +// [FunctionToolParam], [AssistantToolParam]. +type AssistantToolUnionParam interface { + implementsAssistantToolUnionParam() +} + +type CodeInterpreterTool struct { + // The type of tool being defined: `code_interpreter` + Type CodeInterpreterToolType `json:"type,required"` + JSON codeInterpreterToolJSON `json:"-"` +} + +// codeInterpreterToolJSON contains the JSON metadata for the struct +// [CodeInterpreterTool] +type codeInterpreterToolJSON struct { + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterTool) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterTool) implementsAssistantTool() {} + +func (r CodeInterpreterTool) implementsMessageAttachmentsTool() {} + +// The type of tool being defined: `code_interpreter` +type CodeInterpreterToolType string + +const ( + CodeInterpreterToolTypeCodeInterpreter CodeInterpreterToolType = "code_interpreter" +) + +func (r CodeInterpreterToolType) IsKnown() bool { + switch r { + case CodeInterpreterToolTypeCodeInterpreter: + return true + } + return false +} + +type CodeInterpreterToolParam struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[CodeInterpreterToolType] `json:"type,required"` +} + +func (r CodeInterpreterToolParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r CodeInterpreterToolParam) implementsAssistantToolUnionParam() {} + +func (r CodeInterpreterToolParam) implementsBetaThreadNewParamsMessagesAttachmentsToolUnion() {} + +func (r CodeInterpreterToolParam) implementsBetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion() { +} + +func (r CodeInterpreterToolParam) implementsBetaThreadNewAndRunParamsToolUnion() {} + +func (r CodeInterpreterToolParam) implementsBetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion() { +} + +func (r CodeInterpreterToolParam) implementsBetaThreadMessageNewParamsAttachmentsToolUnion() {} + +type FileSearchTool struct { + // The type of tool being defined: `file_search` + Type FileSearchToolType `json:"type,required"` + // Overrides for the file search tool. + FileSearch FileSearchToolFileSearch `json:"file_search"` + JSON fileSearchToolJSON `json:"-"` +} + +// fileSearchToolJSON contains the JSON metadata for the struct [FileSearchTool] +type fileSearchToolJSON struct { + Type apijson.Field + FileSearch apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileSearchTool) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileSearchToolJSON) RawJSON() string { + return r.raw +} + +func (r FileSearchTool) implementsAssistantTool() {} + +// The type of tool being defined: `file_search` +type FileSearchToolType string + +const ( + FileSearchToolTypeFileSearch FileSearchToolType = "file_search" +) + +func (r FileSearchToolType) IsKnown() bool { + switch r { + case FileSearchToolTypeFileSearch: + return true + } + return false +} + +// Overrides for the file search tool. +type FileSearchToolFileSearch struct { + // The maximum number of results the file search tool should output. The default is + // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 + // and 50 inclusive. + // + // Note that the file search tool may output fewer than `max_num_results` results. + // See the + // [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + // for more information. + MaxNumResults int64 `json:"max_num_results"` + JSON fileSearchToolFileSearchJSON `json:"-"` +} + +// fileSearchToolFileSearchJSON contains the JSON metadata for the struct +// [FileSearchToolFileSearch] +type fileSearchToolFileSearchJSON struct { + MaxNumResults apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileSearchToolFileSearch) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileSearchToolFileSearchJSON) RawJSON() string { + return r.raw +} + +type FileSearchToolParam struct { + // The type of tool being defined: `file_search` + Type param.Field[FileSearchToolType] `json:"type,required"` + // Overrides for the file search tool. + FileSearch param.Field[FileSearchToolFileSearchParam] `json:"file_search"` +} + +func (r FileSearchToolParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r FileSearchToolParam) implementsAssistantToolUnionParam() {} + +func (r FileSearchToolParam) implementsBetaThreadNewAndRunParamsToolUnion() {} + +// Overrides for the file search tool. +type FileSearchToolFileSearchParam struct { + // The maximum number of results the file search tool should output. The default is + // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 + // and 50 inclusive. + // + // Note that the file search tool may output fewer than `max_num_results` results. + // See the + // [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + // for more information. + MaxNumResults param.Field[int64] `json:"max_num_results"` +} + +func (r FileSearchToolFileSearchParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type FunctionTool struct { + Function shared.FunctionDefinition `json:"function,required"` + // The type of tool being defined: `function` + Type FunctionToolType `json:"type,required"` + JSON functionToolJSON `json:"-"` +} + +// functionToolJSON contains the JSON metadata for the struct [FunctionTool] +type functionToolJSON struct { + Function apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FunctionTool) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r functionToolJSON) RawJSON() string { + return r.raw +} + +func (r FunctionTool) implementsAssistantTool() {} + +// The type of tool being defined: `function` +type FunctionToolType string + +const ( + FunctionToolTypeFunction FunctionToolType = "function" +) + +func (r FunctionToolType) IsKnown() bool { + switch r { + case FunctionToolTypeFunction: + return true + } + return false +} + +type FunctionToolParam struct { + Function param.Field[shared.FunctionDefinitionParam] `json:"function,required"` + // The type of tool being defined: `function` + Type param.Field[FunctionToolType] `json:"type,required"` +} + +func (r FunctionToolParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r FunctionToolParam) implementsAssistantToolUnionParam() {} + +func (r FunctionToolParam) implementsBetaThreadNewAndRunParamsToolUnion() {} + +type BetaAssistantNewParams struct { + // ID of the model to use. You can use the + // [List models](https://platform.openai.com/docs/api-reference/models/list) API to + // see all of your available models, or see our + // [Model overview](https://platform.openai.com/docs/models/overview) for + // descriptions of them. + Model param.Field[BetaAssistantNewParamsModel] `json:"model,required"` + // The description of the assistant. The maximum length is 512 characters. + Description param.Field[string] `json:"description"` + // The system instructions that the assistant uses. The maximum length is 256,000 + // characters. + Instructions param.Field[string] `json:"instructions"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // The name of the assistant. The maximum length is 256 characters. + Name param.Field[string] `json:"name"` + // Specifies the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + Temperature param.Field[float64] `json:"temperature"` + // A set of resources that are used by the assistant's tools. The resources are + // specific to the type of tool. For example, the `code_interpreter` tool requires + // a list of file IDs, while the `file_search` tool requires a list of vector store + // IDs. + ToolResources param.Field[BetaAssistantNewParamsToolResources] `json:"tool_resources"` + // A list of tool enabled on the assistant. There can be a maximum of 128 tools per + // assistant. Tools can be of types `code_interpreter`, `file_search`, or + // `function`. + Tools param.Field[[]AssistantToolUnionParam] `json:"tools"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or temperature but not both. + TopP param.Field[float64] `json:"top_p"` +} + +func (r BetaAssistantNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantNewParamsModel string + +const ( + BetaAssistantNewParamsModelGPT4o BetaAssistantNewParamsModel = "gpt-4o" + BetaAssistantNewParamsModelGPT4o2024_05_13 BetaAssistantNewParamsModel = "gpt-4o-2024-05-13" + BetaAssistantNewParamsModelGPT4oMini BetaAssistantNewParamsModel = "gpt-4o-mini" + BetaAssistantNewParamsModelGPT4oMini2024_07_18 BetaAssistantNewParamsModel = "gpt-4o-mini-2024-07-18" + BetaAssistantNewParamsModelGPT4Turbo BetaAssistantNewParamsModel = "gpt-4-turbo" + BetaAssistantNewParamsModelGPT4Turbo2024_04_09 BetaAssistantNewParamsModel = "gpt-4-turbo-2024-04-09" + BetaAssistantNewParamsModelGPT4_0125Preview BetaAssistantNewParamsModel = "gpt-4-0125-preview" + BetaAssistantNewParamsModelGPT4TurboPreview BetaAssistantNewParamsModel = "gpt-4-turbo-preview" + BetaAssistantNewParamsModelGPT4_1106Preview BetaAssistantNewParamsModel = "gpt-4-1106-preview" + BetaAssistantNewParamsModelGPT4VisionPreview BetaAssistantNewParamsModel = "gpt-4-vision-preview" + BetaAssistantNewParamsModelGPT4 BetaAssistantNewParamsModel = "gpt-4" + BetaAssistantNewParamsModelGPT4_0314 BetaAssistantNewParamsModel = "gpt-4-0314" + BetaAssistantNewParamsModelGPT4_0613 BetaAssistantNewParamsModel = "gpt-4-0613" + BetaAssistantNewParamsModelGPT4_32k BetaAssistantNewParamsModel = "gpt-4-32k" + BetaAssistantNewParamsModelGPT4_32k0314 BetaAssistantNewParamsModel = "gpt-4-32k-0314" + BetaAssistantNewParamsModelGPT4_32k0613 BetaAssistantNewParamsModel = "gpt-4-32k-0613" + BetaAssistantNewParamsModelGPT3_5Turbo BetaAssistantNewParamsModel = "gpt-3.5-turbo" + BetaAssistantNewParamsModelGPT3_5Turbo16k BetaAssistantNewParamsModel = "gpt-3.5-turbo-16k" + BetaAssistantNewParamsModelGPT3_5Turbo0613 BetaAssistantNewParamsModel = "gpt-3.5-turbo-0613" + BetaAssistantNewParamsModelGPT3_5Turbo1106 BetaAssistantNewParamsModel = "gpt-3.5-turbo-1106" + BetaAssistantNewParamsModelGPT3_5Turbo0125 BetaAssistantNewParamsModel = "gpt-3.5-turbo-0125" + BetaAssistantNewParamsModelGPT3_5Turbo16k0613 BetaAssistantNewParamsModel = "gpt-3.5-turbo-16k-0613" +) + +func (r BetaAssistantNewParamsModel) IsKnown() bool { + switch r { + case BetaAssistantNewParamsModelGPT4o, BetaAssistantNewParamsModelGPT4o2024_05_13, BetaAssistantNewParamsModelGPT4oMini, BetaAssistantNewParamsModelGPT4oMini2024_07_18, BetaAssistantNewParamsModelGPT4Turbo, BetaAssistantNewParamsModelGPT4Turbo2024_04_09, BetaAssistantNewParamsModelGPT4_0125Preview, BetaAssistantNewParamsModelGPT4TurboPreview, BetaAssistantNewParamsModelGPT4_1106Preview, BetaAssistantNewParamsModelGPT4VisionPreview, BetaAssistantNewParamsModelGPT4, BetaAssistantNewParamsModelGPT4_0314, BetaAssistantNewParamsModelGPT4_0613, BetaAssistantNewParamsModelGPT4_32k, BetaAssistantNewParamsModelGPT4_32k0314, BetaAssistantNewParamsModelGPT4_32k0613, BetaAssistantNewParamsModelGPT3_5Turbo, BetaAssistantNewParamsModelGPT3_5Turbo16k, BetaAssistantNewParamsModelGPT3_5Turbo0613, BetaAssistantNewParamsModelGPT3_5Turbo1106, BetaAssistantNewParamsModelGPT3_5Turbo0125, BetaAssistantNewParamsModelGPT3_5Turbo16k0613: + return true + } + return false +} + +// A set of resources that are used by the assistant's tools. The resources are +// specific to the type of tool. For example, the `code_interpreter` tool requires +// a list of file IDs, while the `file_search` tool requires a list of vector store +// IDs. +type BetaAssistantNewParamsToolResources struct { + CodeInterpreter param.Field[BetaAssistantNewParamsToolResourcesCodeInterpreter] `json:"code_interpreter"` + FileSearch param.Field[BetaAssistantNewParamsToolResourcesFileSearch] `json:"file_search"` +} + +func (r BetaAssistantNewParamsToolResources) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantNewParamsToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter` tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs param.Field[[]string] `json:"file_ids"` +} + +func (r BetaAssistantNewParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantNewParamsToolResourcesFileSearch struct { + // The + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this assistant. There can be a maximum of 1 vector store attached to + // the assistant. + VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"` + // A helper to create a + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // with file_ids and attach it to this assistant. There can be a maximum of 1 + // vector store attached to the assistant. + VectorStores param.Field[[]BetaAssistantNewParamsToolResourcesFileSearchVectorStore] `json:"vector_stores"` +} + +func (r BetaAssistantNewParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantNewParamsToolResourcesFileSearchVectorStore struct { + // The chunking strategy used to chunk the file(s). If not set, will use the `auto` + // strategy. + ChunkingStrategy param.Field[BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion] `json:"chunking_strategy"` + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + // add to the vector store. There can be a maximum of 10000 files in a vector + // store. + FileIDs param.Field[[]string] `json:"file_ids"` + // Set of 16 key-value pairs that can be attached to a vector store. This can be + // useful for storing additional information about the vector store in a structured + // format. Keys can be a maximum of 64 characters long and values can be a maxium + // of 512 characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStore) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy struct { + // Always `auto`. + Type param.Field[BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType] `json:"type,required"` + Static param.Field[interface{}] `json:"static,required"` +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy) implementsBetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +// +// Satisfied by +// [BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto], +// [BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic], +// [BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy]. +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion interface { + implementsBetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() +} + +// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of +// `800` and `chunk_overlap_tokens` of `400`. +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto struct { + // Always `auto`. + Type param.Field[BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType] `json:"type,required"` +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto) implementsBetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +// Always `auto`. +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType string + +const ( + BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType = "auto" +) + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType) IsKnown() bool { + switch r { + case BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto: + return true + } + return false +} + +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic struct { + Static param.Field[BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic] `json:"static,required"` + // Always `static`. + Type param.Field[BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType] `json:"type,required"` +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic) implementsBetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"` +} + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Always `static`. +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType string + +const ( + BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticTypeStatic BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType = "static" +) + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType) IsKnown() bool { + switch r { + case BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticTypeStatic: + return true + } + return false +} + +// Always `auto`. +type BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType string + +const ( + BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeAuto BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType = "auto" + BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeStatic BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType = "static" +) + +func (r BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType) IsKnown() bool { + switch r { + case BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeAuto, BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeStatic: + return true + } + return false +} + +type BetaAssistantUpdateParams struct { + // The description of the assistant. The maximum length is 512 characters. + Description param.Field[string] `json:"description"` + // The system instructions that the assistant uses. The maximum length is 256,000 + // characters. + Instructions param.Field[string] `json:"instructions"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // ID of the model to use. You can use the + // [List models](https://platform.openai.com/docs/api-reference/models/list) API to + // see all of your available models, or see our + // [Model overview](https://platform.openai.com/docs/models/overview) for + // descriptions of them. + Model param.Field[string] `json:"model"` + // The name of the assistant. The maximum length is 256 characters. + Name param.Field[string] `json:"name"` + // Specifies the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + Temperature param.Field[float64] `json:"temperature"` + // A set of resources that are used by the assistant's tools. The resources are + // specific to the type of tool. For example, the `code_interpreter` tool requires + // a list of file IDs, while the `file_search` tool requires a list of vector store + // IDs. + ToolResources param.Field[BetaAssistantUpdateParamsToolResources] `json:"tool_resources"` + // A list of tool enabled on the assistant. There can be a maximum of 128 tools per + // assistant. Tools can be of types `code_interpreter`, `file_search`, or + // `function`. + Tools param.Field[[]AssistantToolUnionParam] `json:"tools"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or temperature but not both. + TopP param.Field[float64] `json:"top_p"` +} + +func (r BetaAssistantUpdateParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// A set of resources that are used by the assistant's tools. The resources are +// specific to the type of tool. For example, the `code_interpreter` tool requires +// a list of file IDs, while the `file_search` tool requires a list of vector store +// IDs. +type BetaAssistantUpdateParamsToolResources struct { + CodeInterpreter param.Field[BetaAssistantUpdateParamsToolResourcesCodeInterpreter] `json:"code_interpreter"` + FileSearch param.Field[BetaAssistantUpdateParamsToolResourcesFileSearch] `json:"file_search"` +} + +func (r BetaAssistantUpdateParamsToolResources) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantUpdateParamsToolResourcesCodeInterpreter struct { + // Overrides the list of + // [file](https://platform.openai.com/docs/api-reference/files) IDs made available + // to the `code_interpreter` tool. There can be a maximum of 20 files associated + // with the tool. + FileIDs param.Field[[]string] `json:"file_ids"` +} + +func (r BetaAssistantUpdateParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantUpdateParamsToolResourcesFileSearch struct { + // Overrides the + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this assistant. There can be a maximum of 1 vector store attached to + // the assistant. + VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"` +} + +func (r BetaAssistantUpdateParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaAssistantListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaAssistantListParamsOrder] `query:"order"` +} + +// URLQuery serializes [BetaAssistantListParams]'s query parameters as +// `url.Values`. +func (r BetaAssistantListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaAssistantListParamsOrder string + +const ( + BetaAssistantListParamsOrderAsc BetaAssistantListParamsOrder = "asc" + BetaAssistantListParamsOrderDesc BetaAssistantListParamsOrder = "desc" +) + +func (r BetaAssistantListParamsOrder) IsKnown() bool { + switch r { + case BetaAssistantListParamsOrderAsc, BetaAssistantListParamsOrderDesc: + return true + } + return false +} diff --git a/betaassistant_test.go b/betaassistant_test.go new file mode 100644 index 0000000..1c6f29f --- /dev/null +++ b/betaassistant_test.go @@ -0,0 +1,188 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestBetaAssistantNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Assistants.New(context.TODO(), openai.BetaAssistantNewParams{ + Model: openai.F(openai.BetaAssistantNewParamsModelGPT4o), + Description: openai.F("description"), + Instructions: openai.F("instructions"), + Metadata: openai.F[any](map[string]interface{}{}), + Name: openai.F("name"), + ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), + Temperature: openai.F(1.000000), + ToolResources: openai.F(openai.BetaAssistantNewParamsToolResources{ + CodeInterpreter: openai.F(openai.BetaAssistantNewParamsToolResourcesCodeInterpreter{ + FileIDs: openai.F([]string{"string", "string", "string"}), + }), + FileSearch: openai.F(openai.BetaAssistantNewParamsToolResourcesFileSearch{ + VectorStoreIDs: openai.F([]string{"string"}), + VectorStores: openai.F([]openai.BetaAssistantNewParamsToolResourcesFileSearchVectorStore{{ + FileIDs: openai.F([]string{"string", "string", "string"}), + ChunkingStrategy: openai.F[openai.BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion](openai.BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto{ + Type: openai.F(openai.BetaAssistantNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto), + }), + Metadata: openai.F[any](map[string]interface{}{}), + }}), + }), + }), + Tools: openai.F([]openai.AssistantToolUnionParam{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + TopP: openai.F(1.000000), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaAssistantGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Assistants.Get(context.TODO(), "assistant_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaAssistantUpdateWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Assistants.Update( + context.TODO(), + "assistant_id", + openai.BetaAssistantUpdateParams{ + Description: openai.F("description"), + Instructions: openai.F("instructions"), + Metadata: openai.F[any](map[string]interface{}{}), + Model: openai.F("model"), + Name: openai.F("name"), + ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), + Temperature: openai.F(1.000000), + ToolResources: openai.F(openai.BetaAssistantUpdateParamsToolResources{ + CodeInterpreter: openai.F(openai.BetaAssistantUpdateParamsToolResourcesCodeInterpreter{ + FileIDs: openai.F([]string{"string", "string", "string"}), + }), + FileSearch: openai.F(openai.BetaAssistantUpdateParamsToolResourcesFileSearch{ + VectorStoreIDs: openai.F([]string{"string"}), + }), + }), + Tools: openai.F([]openai.AssistantToolUnionParam{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + TopP: openai.F(1.000000), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaAssistantListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Assistants.List(context.TODO(), openai.BetaAssistantListParams{ + After: openai.F("after"), + Before: openai.F("before"), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaAssistantListParamsOrderAsc), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaAssistantDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Assistants.Delete(context.TODO(), "assistant_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betathread.go b/betathread.go new file mode 100644 index 0000000..0c5f281 --- /dev/null +++ b/betathread.go @@ -0,0 +1,1498 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/packages/ssestream" + "github.com/openai/openai-go/shared" + "github.com/tidwall/gjson" +) + +// BetaThreadService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaThreadService] method instead. +type BetaThreadService struct { + Options []option.RequestOption + Runs *BetaThreadRunService + Messages *BetaThreadMessageService +} + +// NewBetaThreadService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewBetaThreadService(opts ...option.RequestOption) (r *BetaThreadService) { + r = &BetaThreadService{} + r.Options = opts + r.Runs = NewBetaThreadRunService(opts...) + r.Messages = NewBetaThreadMessageService(opts...) + return +} + +// Create a thread. +func (r *BetaThreadService) New(ctx context.Context, body BetaThreadNewParams, opts ...option.RequestOption) (res *Thread, err error) { + opts = append(r.Options[:], opts...) + path := "threads" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieves a thread. +func (r *BetaThreadService) Get(ctx context.Context, threadID string, opts ...option.RequestOption) (res *Thread, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s", threadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Modifies a thread. +func (r *BetaThreadService) Update(ctx context.Context, threadID string, body BetaThreadUpdateParams, opts ...option.RequestOption) (res *Thread, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s", threadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Delete a thread. +func (r *BetaThreadService) Delete(ctx context.Context, threadID string, opts ...option.RequestOption) (res *ThreadDeleted, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s", threadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// Create a thread and run it in one request. +func (r *BetaThreadService) NewAndRun(ctx context.Context, body BetaThreadNewAndRunParams, opts ...option.RequestOption) (res *Run, err error) { + opts = append(r.Options[:], opts...) + path := "threads/runs" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Create a thread and run it in one request. +func (r *BetaThreadService) NewAndRunStreaming(ctx context.Context, body BetaThreadNewAndRunParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEvent]) { + var ( + raw *http.Response + err error + ) + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...) + path := "threads/runs" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...) + return ssestream.NewStream[AssistantStreamEvent](ssestream.NewDecoder(raw), err) +} + +// An object describing the expected output of the model. If `json_object` only +// `function` type `tools` are allowed to be passed to the Run. If `text` the model +// can return text or any value needed. +type AssistantResponseFormat struct { + // Must be one of `text` or `json_object`. + Type AssistantResponseFormatType `json:"type"` + JSON assistantResponseFormatJSON `json:"-"` +} + +// assistantResponseFormatJSON contains the JSON metadata for the struct +// [AssistantResponseFormat] +type assistantResponseFormatJSON struct { + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantResponseFormat) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantResponseFormatJSON) RawJSON() string { + return r.raw +} + +func (r AssistantResponseFormat) implementsAssistantResponseFormatOptionUnion() {} + +// Must be one of `text` or `json_object`. +type AssistantResponseFormatType string + +const ( + AssistantResponseFormatTypeText AssistantResponseFormatType = "text" + AssistantResponseFormatTypeJSONObject AssistantResponseFormatType = "json_object" +) + +func (r AssistantResponseFormatType) IsKnown() bool { + switch r { + case AssistantResponseFormatTypeText, AssistantResponseFormatTypeJSONObject: + return true + } + return false +} + +// An object describing the expected output of the model. If `json_object` only +// `function` type `tools` are allowed to be passed to the Run. If `text` the model +// can return text or any value needed. +type AssistantResponseFormatParam struct { + // Must be one of `text` or `json_object`. + Type param.Field[AssistantResponseFormatType] `json:"type"` +} + +func (r AssistantResponseFormatParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r AssistantResponseFormatParam) implementsAssistantResponseFormatOptionUnionParam() {} + +// Specifies the format that the model must output. Compatible with +// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), +// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +// +// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the +// message the model generates is valid JSON. +// +// **Important:** when using JSON mode, you **must** also instruct the model to +// produce JSON yourself via a system or user message. Without this, the model may +// generate an unending stream of whitespace until the generation reaches the token +// limit, resulting in a long-running and seemingly "stuck" request. Also note that +// the message content may be partially cut off if `finish_reason="length"`, which +// indicates the generation exceeded `max_tokens` or the conversation exceeded the +// max context length. +// +// Union satisfied by [AssistantResponseFormatOptionString] or +// [AssistantResponseFormat]. +type AssistantResponseFormatOptionUnion interface { + implementsAssistantResponseFormatOptionUnion() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*AssistantResponseFormatOptionUnion)(nil)).Elem(), + "", + apijson.UnionVariant{ + TypeFilter: gjson.String, + Type: reflect.TypeOf(AssistantResponseFormatOptionString("")), + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantResponseFormat{}), + }, + ) +} + +// `auto` is the default value +type AssistantResponseFormatOptionString string + +const ( + AssistantResponseFormatOptionStringNone AssistantResponseFormatOptionString = "none" + AssistantResponseFormatOptionStringAuto AssistantResponseFormatOptionString = "auto" +) + +func (r AssistantResponseFormatOptionString) IsKnown() bool { + switch r { + case AssistantResponseFormatOptionStringNone, AssistantResponseFormatOptionStringAuto: + return true + } + return false +} + +func (r AssistantResponseFormatOptionString) implementsAssistantResponseFormatOptionUnion() {} + +func (r AssistantResponseFormatOptionString) implementsAssistantResponseFormatOptionUnionParam() {} + +// Specifies the format that the model must output. Compatible with +// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), +// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +// +// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the +// message the model generates is valid JSON. +// +// **Important:** when using JSON mode, you **must** also instruct the model to +// produce JSON yourself via a system or user message. Without this, the model may +// generate an unending stream of whitespace until the generation reaches the token +// limit, resulting in a long-running and seemingly "stuck" request. Also note that +// the message content may be partially cut off if `finish_reason="length"`, which +// indicates the generation exceeded `max_tokens` or the conversation exceeded the +// max context length. +// +// Satisfied by [AssistantResponseFormatOptionString], +// [AssistantResponseFormatParam]. +type AssistantResponseFormatOptionUnionParam interface { + implementsAssistantResponseFormatOptionUnionParam() +} + +// Specifies a tool the model should use. Use to force the model to call a specific +// tool. +type AssistantToolChoice struct { + // The type of the tool. If type is `function`, the function name must be set + Type AssistantToolChoiceType `json:"type,required"` + Function AssistantToolChoiceFunction `json:"function"` + JSON assistantToolChoiceJSON `json:"-"` +} + +// assistantToolChoiceJSON contains the JSON metadata for the struct +// [AssistantToolChoice] +type assistantToolChoiceJSON struct { + Type apijson.Field + Function apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantToolChoice) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantToolChoiceJSON) RawJSON() string { + return r.raw +} + +func (r AssistantToolChoice) implementsAssistantToolChoiceOptionUnion() {} + +// The type of the tool. If type is `function`, the function name must be set +type AssistantToolChoiceType string + +const ( + AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function" + AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter" + AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search" +) + +func (r AssistantToolChoiceType) IsKnown() bool { + switch r { + case AssistantToolChoiceTypeFunction, AssistantToolChoiceTypeCodeInterpreter, AssistantToolChoiceTypeFileSearch: + return true + } + return false +} + +// Specifies a tool the model should use. Use to force the model to call a specific +// tool. +type AssistantToolChoiceParam struct { + // The type of the tool. If type is `function`, the function name must be set + Type param.Field[AssistantToolChoiceType] `json:"type,required"` + Function param.Field[AssistantToolChoiceFunctionParam] `json:"function"` +} + +func (r AssistantToolChoiceParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r AssistantToolChoiceParam) implementsAssistantToolChoiceOptionUnionParam() {} + +type AssistantToolChoiceFunction struct { + // The name of the function to call. + Name string `json:"name,required"` + JSON assistantToolChoiceFunctionJSON `json:"-"` +} + +// assistantToolChoiceFunctionJSON contains the JSON metadata for the struct +// [AssistantToolChoiceFunction] +type assistantToolChoiceFunctionJSON struct { + Name apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AssistantToolChoiceFunction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r assistantToolChoiceFunctionJSON) RawJSON() string { + return r.raw +} + +type AssistantToolChoiceFunctionParam struct { + // The name of the function to call. + Name param.Field[string] `json:"name,required"` +} + +func (r AssistantToolChoiceFunctionParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Controls which (if any) tool is called by the model. `none` means the model will +// not call any tools and instead generates a message. `auto` is the default value +// and means the model can pick between generating a message or calling one or more +// tools. `required` means the model must call one or more tools before responding +// to the user. Specifying a particular tool like `{"type": "file_search"}` or +// `{"type": "function", "function": {"name": "my_function"}}` forces the model to +// call that tool. +// +// Union satisfied by [AssistantToolChoiceOptionString] or [AssistantToolChoice]. +type AssistantToolChoiceOptionUnion interface { + implementsAssistantToolChoiceOptionUnion() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*AssistantToolChoiceOptionUnion)(nil)).Elem(), + "", + apijson.UnionVariant{ + TypeFilter: gjson.String, + Type: reflect.TypeOf(AssistantToolChoiceOptionString("")), + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(AssistantToolChoice{}), + }, + ) +} + +// `none` means the model will not call any tools and instead generates a message. +// `auto` means the model can pick between generating a message or calling one or +// more tools. `required` means the model must call one or more tools before +// responding to the user. +type AssistantToolChoiceOptionString string + +const ( + AssistantToolChoiceOptionStringNone AssistantToolChoiceOptionString = "none" + AssistantToolChoiceOptionStringAuto AssistantToolChoiceOptionString = "auto" + AssistantToolChoiceOptionStringRequired AssistantToolChoiceOptionString = "required" +) + +func (r AssistantToolChoiceOptionString) IsKnown() bool { + switch r { + case AssistantToolChoiceOptionStringNone, AssistantToolChoiceOptionStringAuto, AssistantToolChoiceOptionStringRequired: + return true + } + return false +} + +func (r AssistantToolChoiceOptionString) implementsAssistantToolChoiceOptionUnion() {} + +func (r AssistantToolChoiceOptionString) implementsAssistantToolChoiceOptionUnionParam() {} + +// Controls which (if any) tool is called by the model. `none` means the model will +// not call any tools and instead generates a message. `auto` is the default value +// and means the model can pick between generating a message or calling one or more +// tools. `required` means the model must call one or more tools before responding +// to the user. Specifying a particular tool like `{"type": "file_search"}` or +// `{"type": "function", "function": {"name": "my_function"}}` forces the model to +// call that tool. +// +// Satisfied by [AssistantToolChoiceOptionString], [AssistantToolChoiceParam]. +type AssistantToolChoiceOptionUnionParam interface { + implementsAssistantToolChoiceOptionUnionParam() +} + +// Represents a thread that contains +// [messages](https://platform.openai.com/docs/api-reference/messages). +type Thread struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the thread was created. + CreatedAt int64 `json:"created_at,required"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,required,nullable"` + // The object type, which is always `thread`. + Object ThreadObject `json:"object,required"` + // A set of resources that are made available to the assistant's tools in this + // thread. The resources are specific to the type of tool. For example, the + // `code_interpreter` tool requires a list of file IDs, while the `file_search` + // tool requires a list of vector store IDs. + ToolResources ThreadToolResources `json:"tool_resources,required,nullable"` + JSON threadJSON `json:"-"` +} + +// threadJSON contains the JSON metadata for the struct [Thread] +type threadJSON struct { + ID apijson.Field + CreatedAt apijson.Field + Metadata apijson.Field + Object apijson.Field + ToolResources apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Thread) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r threadJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `thread`. +type ThreadObject string + +const ( + ThreadObjectThread ThreadObject = "thread" +) + +func (r ThreadObject) IsKnown() bool { + switch r { + case ThreadObjectThread: + return true + } + return false +} + +// A set of resources that are made available to the assistant's tools in this +// thread. The resources are specific to the type of tool. For example, the +// `code_interpreter` tool requires a list of file IDs, while the `file_search` +// tool requires a list of vector store IDs. +type ThreadToolResources struct { + CodeInterpreter ThreadToolResourcesCodeInterpreter `json:"code_interpreter"` + FileSearch ThreadToolResourcesFileSearch `json:"file_search"` + JSON threadToolResourcesJSON `json:"-"` +} + +// threadToolResourcesJSON contains the JSON metadata for the struct +// [ThreadToolResources] +type threadToolResourcesJSON struct { + CodeInterpreter apijson.Field + FileSearch apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ThreadToolResources) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r threadToolResourcesJSON) RawJSON() string { + return r.raw +} + +type ThreadToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter` tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs []string `json:"file_ids"` + JSON threadToolResourcesCodeInterpreterJSON `json:"-"` +} + +// threadToolResourcesCodeInterpreterJSON contains the JSON metadata for the struct +// [ThreadToolResourcesCodeInterpreter] +type threadToolResourcesCodeInterpreterJSON struct { + FileIDs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ThreadToolResourcesCodeInterpreter) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r threadToolResourcesCodeInterpreterJSON) RawJSON() string { + return r.raw +} + +type ThreadToolResourcesFileSearch struct { + // The + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this thread. There can be a maximum of 1 vector store attached to + // the thread. + VectorStoreIDs []string `json:"vector_store_ids"` + JSON threadToolResourcesFileSearchJSON `json:"-"` +} + +// threadToolResourcesFileSearchJSON contains the JSON metadata for the struct +// [ThreadToolResourcesFileSearch] +type threadToolResourcesFileSearchJSON struct { + VectorStoreIDs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ThreadToolResourcesFileSearch) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r threadToolResourcesFileSearchJSON) RawJSON() string { + return r.raw +} + +type ThreadDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object ThreadDeletedObject `json:"object,required"` + JSON threadDeletedJSON `json:"-"` +} + +// threadDeletedJSON contains the JSON metadata for the struct [ThreadDeleted] +type threadDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ThreadDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r threadDeletedJSON) RawJSON() string { + return r.raw +} + +type ThreadDeletedObject string + +const ( + ThreadDeletedObjectThreadDeleted ThreadDeletedObject = "thread.deleted" +) + +func (r ThreadDeletedObject) IsKnown() bool { + switch r { + case ThreadDeletedObjectThreadDeleted: + return true + } + return false +} + +type BetaThreadNewParams struct { + // A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + // start the thread with. + Messages param.Field[[]BetaThreadNewParamsMessage] `json:"messages"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // A set of resources that are made available to the assistant's tools in this + // thread. The resources are specific to the type of tool. For example, the + // `code_interpreter` tool requires a list of file IDs, while the `file_search` + // tool requires a list of vector store IDs. + ToolResources param.Field[BetaThreadNewParamsToolResources] `json:"tool_resources"` +} + +func (r BetaThreadNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewParamsMessage struct { + // The text contents of the message. + Content param.Field[BetaThreadNewParamsMessagesContentUnion] `json:"content,required"` + // The role of the entity that is creating the message. Allowed values include: + // + // - `user`: Indicates the message is sent by an actual user and should be used in + // most cases to represent user-generated messages. + // - `assistant`: Indicates the message is generated by the assistant. Use this + // value to insert messages from the assistant into the conversation. + Role param.Field[BetaThreadNewParamsMessagesRole] `json:"role,required"` + // A list of files attached to the message, and the tools they should be added to. + Attachments param.Field[[]BetaThreadNewParamsMessagesAttachment] `json:"attachments"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadNewParamsMessage) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The text contents of the message. +// +// Satisfied by [shared.UnionString], +// [BetaThreadNewParamsMessagesContentArrayOfContentParts]. +type BetaThreadNewParamsMessagesContentUnion interface { + ImplementsBetaThreadNewParamsMessagesContentUnion() +} + +type BetaThreadNewParamsMessagesContentArrayOfContentParts []MessageContentPartParamUnion + +func (r BetaThreadNewParamsMessagesContentArrayOfContentParts) ImplementsBetaThreadNewParamsMessagesContentUnion() { +} + +// The role of the entity that is creating the message. Allowed values include: +// +// - `user`: Indicates the message is sent by an actual user and should be used in +// most cases to represent user-generated messages. +// - `assistant`: Indicates the message is generated by the assistant. Use this +// value to insert messages from the assistant into the conversation. +type BetaThreadNewParamsMessagesRole string + +const ( + BetaThreadNewParamsMessagesRoleUser BetaThreadNewParamsMessagesRole = "user" + BetaThreadNewParamsMessagesRoleAssistant BetaThreadNewParamsMessagesRole = "assistant" +) + +func (r BetaThreadNewParamsMessagesRole) IsKnown() bool { + switch r { + case BetaThreadNewParamsMessagesRoleUser, BetaThreadNewParamsMessagesRoleAssistant: + return true + } + return false +} + +type BetaThreadNewParamsMessagesAttachment struct { + // The ID of the file to attach to the message. + FileID param.Field[string] `json:"file_id"` + // The tools to add this file to. + Tools param.Field[[]BetaThreadNewParamsMessagesAttachmentsToolUnion] `json:"tools"` +} + +func (r BetaThreadNewParamsMessagesAttachment) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewParamsMessagesAttachmentsTool struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[BetaThreadNewParamsMessagesAttachmentsToolsType] `json:"type,required"` +} + +func (r BetaThreadNewParamsMessagesAttachmentsTool) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewParamsMessagesAttachmentsTool) implementsBetaThreadNewParamsMessagesAttachmentsToolUnion() { +} + +// Satisfied by [CodeInterpreterToolParam], +// [BetaThreadNewParamsMessagesAttachmentsToolsFileSearch], +// [BetaThreadNewParamsMessagesAttachmentsTool]. +type BetaThreadNewParamsMessagesAttachmentsToolUnion interface { + implementsBetaThreadNewParamsMessagesAttachmentsToolUnion() +} + +type BetaThreadNewParamsMessagesAttachmentsToolsFileSearch struct { + // The type of tool being defined: `file_search` + Type param.Field[BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType] `json:"type,required"` +} + +func (r BetaThreadNewParamsMessagesAttachmentsToolsFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewParamsMessagesAttachmentsToolsFileSearch) implementsBetaThreadNewParamsMessagesAttachmentsToolUnion() { +} + +// The type of tool being defined: `file_search` +type BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType string + +const ( + BetaThreadNewParamsMessagesAttachmentsToolsFileSearchTypeFileSearch BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType = "file_search" +) + +func (r BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType) IsKnown() bool { + switch r { + case BetaThreadNewParamsMessagesAttachmentsToolsFileSearchTypeFileSearch: + return true + } + return false +} + +// The type of tool being defined: `code_interpreter` +type BetaThreadNewParamsMessagesAttachmentsToolsType string + +const ( + BetaThreadNewParamsMessagesAttachmentsToolsTypeCodeInterpreter BetaThreadNewParamsMessagesAttachmentsToolsType = "code_interpreter" + BetaThreadNewParamsMessagesAttachmentsToolsTypeFileSearch BetaThreadNewParamsMessagesAttachmentsToolsType = "file_search" +) + +func (r BetaThreadNewParamsMessagesAttachmentsToolsType) IsKnown() bool { + switch r { + case BetaThreadNewParamsMessagesAttachmentsToolsTypeCodeInterpreter, BetaThreadNewParamsMessagesAttachmentsToolsTypeFileSearch: + return true + } + return false +} + +// A set of resources that are made available to the assistant's tools in this +// thread. The resources are specific to the type of tool. For example, the +// `code_interpreter` tool requires a list of file IDs, while the `file_search` +// tool requires a list of vector store IDs. +type BetaThreadNewParamsToolResources struct { + CodeInterpreter param.Field[BetaThreadNewParamsToolResourcesCodeInterpreter] `json:"code_interpreter"` + FileSearch param.Field[BetaThreadNewParamsToolResourcesFileSearch] `json:"file_search"` +} + +func (r BetaThreadNewParamsToolResources) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewParamsToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter` tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs param.Field[[]string] `json:"file_ids"` +} + +func (r BetaThreadNewParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewParamsToolResourcesFileSearch struct { + // The + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this thread. There can be a maximum of 1 vector store attached to + // the thread. + VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"` + // A helper to create a + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // with file_ids and attach it to this thread. There can be a maximum of 1 vector + // store attached to the thread. + VectorStores param.Field[[]BetaThreadNewParamsToolResourcesFileSearchVectorStore] `json:"vector_stores"` +} + +func (r BetaThreadNewParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewParamsToolResourcesFileSearchVectorStore struct { + // The chunking strategy used to chunk the file(s). If not set, will use the `auto` + // strategy. + ChunkingStrategy param.Field[BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion] `json:"chunking_strategy"` + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + // add to the vector store. There can be a maximum of 10000 files in a vector + // store. + FileIDs param.Field[[]string] `json:"file_ids"` + // Set of 16 key-value pairs that can be attached to a vector store. This can be + // useful for storing additional information about the vector store in a structured + // format. Keys can be a maximum of 64 characters long and values can be a maxium + // of 512 characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStore) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy struct { + // Always `auto`. + Type param.Field[BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType] `json:"type,required"` + Static param.Field[interface{}] `json:"static,required"` +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy) implementsBetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +// +// Satisfied by +// [BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto], +// [BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic], +// [BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategy]. +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion interface { + implementsBetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() +} + +// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of +// `800` and `chunk_overlap_tokens` of `400`. +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto struct { + // Always `auto`. + Type param.Field[BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType] `json:"type,required"` +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto) implementsBetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +// Always `auto`. +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType string + +const ( + BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType = "auto" +) + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoType) IsKnown() bool { + switch r { + case BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto: + return true + } + return false +} + +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic struct { + Static param.Field[BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic] `json:"static,required"` + // Always `static`. + Type param.Field[BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType] `json:"type,required"` +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStatic) implementsBetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"` +} + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Always `static`. +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType string + +const ( + BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticTypeStatic BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType = "static" +) + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticType) IsKnown() bool { + switch r { + case BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyStaticTypeStatic: + return true + } + return false +} + +// Always `auto`. +type BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType string + +const ( + BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeAuto BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType = "auto" + BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeStatic BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType = "static" +) + +func (r BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyType) IsKnown() bool { + switch r { + case BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeAuto, BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyTypeStatic: + return true + } + return false +} + +type BetaThreadUpdateParams struct { + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // A set of resources that are made available to the assistant's tools in this + // thread. The resources are specific to the type of tool. For example, the + // `code_interpreter` tool requires a list of file IDs, while the `file_search` + // tool requires a list of vector store IDs. + ToolResources param.Field[BetaThreadUpdateParamsToolResources] `json:"tool_resources"` +} + +func (r BetaThreadUpdateParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// A set of resources that are made available to the assistant's tools in this +// thread. The resources are specific to the type of tool. For example, the +// `code_interpreter` tool requires a list of file IDs, while the `file_search` +// tool requires a list of vector store IDs. +type BetaThreadUpdateParamsToolResources struct { + CodeInterpreter param.Field[BetaThreadUpdateParamsToolResourcesCodeInterpreter] `json:"code_interpreter"` + FileSearch param.Field[BetaThreadUpdateParamsToolResourcesFileSearch] `json:"file_search"` +} + +func (r BetaThreadUpdateParamsToolResources) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadUpdateParamsToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter` tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs param.Field[[]string] `json:"file_ids"` +} + +func (r BetaThreadUpdateParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadUpdateParamsToolResourcesFileSearch struct { + // The + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this thread. There can be a maximum of 1 vector store attached to + // the thread. + VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"` +} + +func (r BetaThreadUpdateParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParams struct { + // The ID of the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + // execute this run. + AssistantID param.Field[string] `json:"assistant_id,required"` + // Override the default system message of the assistant. This is useful for + // modifying the behavior on a per-run basis. + Instructions param.Field[string] `json:"instructions"` + // The maximum number of completion tokens that may be used over the course of the + // run. The run will make a best effort to use only the number of completion tokens + // specified, across multiple turns of the run. If the run exceeds the number of + // completion tokens specified, the run will end with status `incomplete`. See + // `incomplete_details` for more info. + MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"` + // The maximum number of prompt tokens that may be used over the course of the run. + // The run will make a best effort to use only the number of prompt tokens + // specified, across multiple turns of the run. If the run exceeds the number of + // prompt tokens specified, the run will end with status `incomplete`. See + // `incomplete_details` for more info. + MaxPromptTokens param.Field[int64] `json:"max_prompt_tokens"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + // be used to execute this run. If a value is provided here, it will override the + // model associated with the assistant. If not, the model associated with the + // assistant will be used. + Model param.Field[BetaThreadNewAndRunParamsModel] `json:"model"` + // Whether to enable + // [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + // during tool use. + ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"` + // Specifies the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + Temperature param.Field[float64] `json:"temperature"` + // If no thread is provided, an empty thread will be created. + Thread param.Field[BetaThreadNewAndRunParamsThread] `json:"thread"` + // Controls which (if any) tool is called by the model. `none` means the model will + // not call any tools and instead generates a message. `auto` is the default value + // and means the model can pick between generating a message or calling one or more + // tools. `required` means the model must call one or more tools before responding + // to the user. Specifying a particular tool like `{"type": "file_search"}` or + // `{"type": "function", "function": {"name": "my_function"}}` forces the model to + // call that tool. + ToolChoice param.Field[AssistantToolChoiceOptionUnionParam] `json:"tool_choice"` + // A set of resources that are used by the assistant's tools. The resources are + // specific to the type of tool. For example, the `code_interpreter` tool requires + // a list of file IDs, while the `file_search` tool requires a list of vector store + // IDs. + ToolResources param.Field[BetaThreadNewAndRunParamsToolResources] `json:"tool_resources"` + // Override the tools the assistant can use for this run. This is useful for + // modifying the behavior on a per-run basis. + Tools param.Field[[]BetaThreadNewAndRunParamsToolUnion] `json:"tools"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or temperature but not both. + TopP param.Field[float64] `json:"top_p"` + // Controls for how a thread will be truncated prior to the run. Use this to + // control the intial context window of the run. + TruncationStrategy param.Field[BetaThreadNewAndRunParamsTruncationStrategy] `json:"truncation_strategy"` +} + +func (r BetaThreadNewAndRunParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsModel string + +const ( + BetaThreadNewAndRunParamsModelGPT4o BetaThreadNewAndRunParamsModel = "gpt-4o" + BetaThreadNewAndRunParamsModelGPT4o2024_05_13 BetaThreadNewAndRunParamsModel = "gpt-4o-2024-05-13" + BetaThreadNewAndRunParamsModelGPT4oMini BetaThreadNewAndRunParamsModel = "gpt-4o-mini" + BetaThreadNewAndRunParamsModelGPT4oMini2024_07_18 BetaThreadNewAndRunParamsModel = "gpt-4o-mini-2024-07-18" + BetaThreadNewAndRunParamsModelGPT4Turbo BetaThreadNewAndRunParamsModel = "gpt-4-turbo" + BetaThreadNewAndRunParamsModelGPT4Turbo2024_04_09 BetaThreadNewAndRunParamsModel = "gpt-4-turbo-2024-04-09" + BetaThreadNewAndRunParamsModelGPT4_0125Preview BetaThreadNewAndRunParamsModel = "gpt-4-0125-preview" + BetaThreadNewAndRunParamsModelGPT4TurboPreview BetaThreadNewAndRunParamsModel = "gpt-4-turbo-preview" + BetaThreadNewAndRunParamsModelGPT4_1106Preview BetaThreadNewAndRunParamsModel = "gpt-4-1106-preview" + BetaThreadNewAndRunParamsModelGPT4VisionPreview BetaThreadNewAndRunParamsModel = "gpt-4-vision-preview" + BetaThreadNewAndRunParamsModelGPT4 BetaThreadNewAndRunParamsModel = "gpt-4" + BetaThreadNewAndRunParamsModelGPT4_0314 BetaThreadNewAndRunParamsModel = "gpt-4-0314" + BetaThreadNewAndRunParamsModelGPT4_0613 BetaThreadNewAndRunParamsModel = "gpt-4-0613" + BetaThreadNewAndRunParamsModelGPT4_32k BetaThreadNewAndRunParamsModel = "gpt-4-32k" + BetaThreadNewAndRunParamsModelGPT4_32k0314 BetaThreadNewAndRunParamsModel = "gpt-4-32k-0314" + BetaThreadNewAndRunParamsModelGPT4_32k0613 BetaThreadNewAndRunParamsModel = "gpt-4-32k-0613" + BetaThreadNewAndRunParamsModelGPT3_5Turbo BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo" + BetaThreadNewAndRunParamsModelGPT3_5Turbo16k BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-16k" + BetaThreadNewAndRunParamsModelGPT3_5Turbo0613 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-0613" + BetaThreadNewAndRunParamsModelGPT3_5Turbo1106 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-1106" + BetaThreadNewAndRunParamsModelGPT3_5Turbo0125 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-0125" + BetaThreadNewAndRunParamsModelGPT3_5Turbo16k0613 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-16k-0613" +) + +func (r BetaThreadNewAndRunParamsModel) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsModelGPT4o, BetaThreadNewAndRunParamsModelGPT4o2024_05_13, BetaThreadNewAndRunParamsModelGPT4oMini, BetaThreadNewAndRunParamsModelGPT4oMini2024_07_18, BetaThreadNewAndRunParamsModelGPT4Turbo, BetaThreadNewAndRunParamsModelGPT4Turbo2024_04_09, BetaThreadNewAndRunParamsModelGPT4_0125Preview, BetaThreadNewAndRunParamsModelGPT4TurboPreview, BetaThreadNewAndRunParamsModelGPT4_1106Preview, BetaThreadNewAndRunParamsModelGPT4VisionPreview, BetaThreadNewAndRunParamsModelGPT4, BetaThreadNewAndRunParamsModelGPT4_0314, BetaThreadNewAndRunParamsModelGPT4_0613, BetaThreadNewAndRunParamsModelGPT4_32k, BetaThreadNewAndRunParamsModelGPT4_32k0314, BetaThreadNewAndRunParamsModelGPT4_32k0613, BetaThreadNewAndRunParamsModelGPT3_5Turbo, BetaThreadNewAndRunParamsModelGPT3_5Turbo16k, BetaThreadNewAndRunParamsModelGPT3_5Turbo0613, BetaThreadNewAndRunParamsModelGPT3_5Turbo1106, BetaThreadNewAndRunParamsModelGPT3_5Turbo0125, BetaThreadNewAndRunParamsModelGPT3_5Turbo16k0613: + return true + } + return false +} + +// If no thread is provided, an empty thread will be created. +type BetaThreadNewAndRunParamsThread struct { + // A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + // start the thread with. + Messages param.Field[[]BetaThreadNewAndRunParamsThreadMessage] `json:"messages"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // A set of resources that are made available to the assistant's tools in this + // thread. The resources are specific to the type of tool. For example, the + // `code_interpreter` tool requires a list of file IDs, while the `file_search` + // tool requires a list of vector store IDs. + ToolResources param.Field[BetaThreadNewAndRunParamsThreadToolResources] `json:"tool_resources"` +} + +func (r BetaThreadNewAndRunParamsThread) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsThreadMessage struct { + // The text contents of the message. + Content param.Field[BetaThreadNewAndRunParamsThreadMessagesContentUnion] `json:"content,required"` + // The role of the entity that is creating the message. Allowed values include: + // + // - `user`: Indicates the message is sent by an actual user and should be used in + // most cases to represent user-generated messages. + // - `assistant`: Indicates the message is generated by the assistant. Use this + // value to insert messages from the assistant into the conversation. + Role param.Field[BetaThreadNewAndRunParamsThreadMessagesRole] `json:"role,required"` + // A list of files attached to the message, and the tools they should be added to. + Attachments param.Field[[]BetaThreadNewAndRunParamsThreadMessagesAttachment] `json:"attachments"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadNewAndRunParamsThreadMessage) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The text contents of the message. +// +// Satisfied by [shared.UnionString], +// [BetaThreadNewAndRunParamsThreadMessagesContentArrayOfContentParts]. +type BetaThreadNewAndRunParamsThreadMessagesContentUnion interface { + ImplementsBetaThreadNewAndRunParamsThreadMessagesContentUnion() +} + +type BetaThreadNewAndRunParamsThreadMessagesContentArrayOfContentParts []MessageContentPartParamUnion + +func (r BetaThreadNewAndRunParamsThreadMessagesContentArrayOfContentParts) ImplementsBetaThreadNewAndRunParamsThreadMessagesContentUnion() { +} + +// The role of the entity that is creating the message. Allowed values include: +// +// - `user`: Indicates the message is sent by an actual user and should be used in +// most cases to represent user-generated messages. +// - `assistant`: Indicates the message is generated by the assistant. Use this +// value to insert messages from the assistant into the conversation. +type BetaThreadNewAndRunParamsThreadMessagesRole string + +const ( + BetaThreadNewAndRunParamsThreadMessagesRoleUser BetaThreadNewAndRunParamsThreadMessagesRole = "user" + BetaThreadNewAndRunParamsThreadMessagesRoleAssistant BetaThreadNewAndRunParamsThreadMessagesRole = "assistant" +) + +func (r BetaThreadNewAndRunParamsThreadMessagesRole) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsThreadMessagesRoleUser, BetaThreadNewAndRunParamsThreadMessagesRoleAssistant: + return true + } + return false +} + +type BetaThreadNewAndRunParamsThreadMessagesAttachment struct { + // The ID of the file to attach to the message. + FileID param.Field[string] `json:"file_id"` + // The tools to add this file to. + Tools param.Field[[]BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion] `json:"tools"` +} + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachment) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType] `json:"type,required"` +} + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool) implementsBetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion() { +} + +// Satisfied by [CodeInterpreterToolParam], +// [BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch], +// [BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool]. +type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion interface { + implementsBetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion() +} + +type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch struct { + // The type of tool being defined: `file_search` + Type param.Field[BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType] `json:"type,required"` +} + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch) implementsBetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion() { +} + +// The type of tool being defined: `file_search` +type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType string + +const ( + BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchTypeFileSearch BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType = "file_search" +) + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchTypeFileSearch: + return true + } + return false +} + +// The type of tool being defined: `code_interpreter` +type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType string + +const ( + BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsTypeCodeInterpreter BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType = "code_interpreter" + BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsTypeFileSearch BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType = "file_search" +) + +func (r BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsTypeCodeInterpreter, BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsTypeFileSearch: + return true + } + return false +} + +// A set of resources that are made available to the assistant's tools in this +// thread. The resources are specific to the type of tool. For example, the +// `code_interpreter` tool requires a list of file IDs, while the `file_search` +// tool requires a list of vector store IDs. +type BetaThreadNewAndRunParamsThreadToolResources struct { + CodeInterpreter param.Field[BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter] `json:"code_interpreter"` + FileSearch param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearch] `json:"file_search"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResources) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter` tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs param.Field[[]string] `json:"file_ids"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearch struct { + // The + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this thread. There can be a maximum of 1 vector store attached to + // the thread. + VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"` + // A helper to create a + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // with file_ids and attach it to this thread. There can be a maximum of 1 vector + // store attached to the thread. + VectorStores param.Field[[]BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore] `json:"vector_stores"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore struct { + // The chunking strategy used to chunk the file(s). If not set, will use the `auto` + // strategy. + ChunkingStrategy param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion] `json:"chunking_strategy"` + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + // add to the vector store. There can be a maximum of 10000 files in a vector + // store. + FileIDs param.Field[[]string] `json:"file_ids"` + // Set of 16 key-value pairs that can be attached to a vector store. This can be + // useful for storing additional information about the vector store in a structured + // format. Keys can be a maximum of 64 characters long and values can be a maxium + // of 512 characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategy struct { + // Always `auto`. + Type param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyType] `json:"type,required"` + Static param.Field[interface{}] `json:"static,required"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategy) implementsBetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +// +// Satisfied by +// [BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAuto], +// [BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStatic], +// [BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategy]. +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion interface { + implementsBetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion() +} + +// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of +// `800` and `chunk_overlap_tokens` of `400`. +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAuto struct { + // Always `auto`. + Type param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoType] `json:"type,required"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAuto) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAuto) implementsBetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +// Always `auto`. +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoType string + +const ( + BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoType = "auto" +) + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto: + return true + } + return false +} + +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStatic struct { + Static param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic] `json:"static,required"` + // Always `static`. + Type param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticType] `json:"type,required"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStatic) implementsBetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion() { +} + +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"` +} + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Always `static`. +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticType string + +const ( + BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticTypeStatic BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticType = "static" +) + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyStaticTypeStatic: + return true + } + return false +} + +// Always `auto`. +type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyType string + +const ( + BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyTypeAuto BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyType = "auto" + BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyTypeStatic BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyType = "static" +) + +func (r BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyTypeAuto, BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyTypeStatic: + return true + } + return false +} + +// A set of resources that are used by the assistant's tools. The resources are +// specific to the type of tool. For example, the `code_interpreter` tool requires +// a list of file IDs, while the `file_search` tool requires a list of vector store +// IDs. +type BetaThreadNewAndRunParamsToolResources struct { + CodeInterpreter param.Field[BetaThreadNewAndRunParamsToolResourcesCodeInterpreter] `json:"code_interpreter"` + FileSearch param.Field[BetaThreadNewAndRunParamsToolResourcesFileSearch] `json:"file_search"` +} + +func (r BetaThreadNewAndRunParamsToolResources) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsToolResourcesCodeInterpreter struct { + // A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + // available to the `code_interpreter` tool. There can be a maximum of 20 files + // associated with the tool. + FileIDs param.Field[[]string] `json:"file_ids"` +} + +func (r BetaThreadNewAndRunParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsToolResourcesFileSearch struct { + // The ID of the + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // attached to this assistant. There can be a maximum of 1 vector store attached to + // the assistant. + VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"` +} + +func (r BetaThreadNewAndRunParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadNewAndRunParamsTool struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[BetaThreadNewAndRunParamsToolsType] `json:"type,required"` + FileSearch param.Field[interface{}] `json:"file_search,required"` + Function param.Field[shared.FunctionDefinitionParam] `json:"function"` +} + +func (r BetaThreadNewAndRunParamsTool) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadNewAndRunParamsTool) implementsBetaThreadNewAndRunParamsToolUnion() {} + +// Satisfied by [CodeInterpreterToolParam], [FileSearchToolParam], +// [FunctionToolParam], [BetaThreadNewAndRunParamsTool]. +type BetaThreadNewAndRunParamsToolUnion interface { + implementsBetaThreadNewAndRunParamsToolUnion() +} + +// The type of tool being defined: `code_interpreter` +type BetaThreadNewAndRunParamsToolsType string + +const ( + BetaThreadNewAndRunParamsToolsTypeCodeInterpreter BetaThreadNewAndRunParamsToolsType = "code_interpreter" + BetaThreadNewAndRunParamsToolsTypeFileSearch BetaThreadNewAndRunParamsToolsType = "file_search" + BetaThreadNewAndRunParamsToolsTypeFunction BetaThreadNewAndRunParamsToolsType = "function" +) + +func (r BetaThreadNewAndRunParamsToolsType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsToolsTypeCodeInterpreter, BetaThreadNewAndRunParamsToolsTypeFileSearch, BetaThreadNewAndRunParamsToolsTypeFunction: + return true + } + return false +} + +// Controls for how a thread will be truncated prior to the run. Use this to +// control the intial context window of the run. +type BetaThreadNewAndRunParamsTruncationStrategy struct { + // The truncation strategy to use for the thread. The default is `auto`. If set to + // `last_messages`, the thread will be truncated to the n most recent messages in + // the thread. When set to `auto`, messages in the middle of the thread will be + // dropped to fit the context length of the model, `max_prompt_tokens`. + Type param.Field[BetaThreadNewAndRunParamsTruncationStrategyType] `json:"type,required"` + // The number of most recent messages from the thread when constructing the context + // for the run. + LastMessages param.Field[int64] `json:"last_messages"` +} + +func (r BetaThreadNewAndRunParamsTruncationStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The truncation strategy to use for the thread. The default is `auto`. If set to +// `last_messages`, the thread will be truncated to the n most recent messages in +// the thread. When set to `auto`, messages in the middle of the thread will be +// dropped to fit the context length of the model, `max_prompt_tokens`. +type BetaThreadNewAndRunParamsTruncationStrategyType string + +const ( + BetaThreadNewAndRunParamsTruncationStrategyTypeAuto BetaThreadNewAndRunParamsTruncationStrategyType = "auto" + BetaThreadNewAndRunParamsTruncationStrategyTypeLastMessages BetaThreadNewAndRunParamsTruncationStrategyType = "last_messages" +) + +func (r BetaThreadNewAndRunParamsTruncationStrategyType) IsKnown() bool { + switch r { + case BetaThreadNewAndRunParamsTruncationStrategyTypeAuto, BetaThreadNewAndRunParamsTruncationStrategyTypeLastMessages: + return true + } + return false +} diff --git a/betathread_test.go b/betathread_test.go new file mode 100644 index 0000000..e1acb0e --- /dev/null +++ b/betathread_test.go @@ -0,0 +1,399 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestBetaThreadNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.New(context.TODO(), openai.BetaThreadNewParams{ + Messages: openai.F([]openai.BetaThreadNewParamsMessage{{ + Role: openai.F(openai.BetaThreadNewParamsMessagesRoleUser), + Content: openai.F[openai.BetaThreadNewParamsMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadNewParamsMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, { + Role: openai.F(openai.BetaThreadNewParamsMessagesRoleUser), + Content: openai.F[openai.BetaThreadNewParamsMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadNewParamsMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, { + Role: openai.F(openai.BetaThreadNewParamsMessagesRoleUser), + Content: openai.F[openai.BetaThreadNewParamsMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadNewParamsMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewParamsMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + ToolResources: openai.F(openai.BetaThreadNewParamsToolResources{ + CodeInterpreter: openai.F(openai.BetaThreadNewParamsToolResourcesCodeInterpreter{ + FileIDs: openai.F([]string{"string", "string", "string"}), + }), + FileSearch: openai.F(openai.BetaThreadNewParamsToolResourcesFileSearch{ + VectorStoreIDs: openai.F([]string{"string"}), + VectorStores: openai.F([]openai.BetaThreadNewParamsToolResourcesFileSearchVectorStore{{ + FileIDs: openai.F([]string{"string", "string", "string"}), + ChunkingStrategy: openai.F[openai.BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyUnion](openai.BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAuto{ + Type: openai.F(openai.BetaThreadNewParamsToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto), + }), + Metadata: openai.F[any](map[string]interface{}{}), + }}), + }), + }), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Get(context.TODO(), "thread_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadUpdateWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Update( + context.TODO(), + "thread_id", + openai.BetaThreadUpdateParams{ + Metadata: openai.F[any](map[string]interface{}{}), + ToolResources: openai.F(openai.BetaThreadUpdateParamsToolResources{ + CodeInterpreter: openai.F(openai.BetaThreadUpdateParamsToolResourcesCodeInterpreter{ + FileIDs: openai.F([]string{"string", "string", "string"}), + }), + FileSearch: openai.F(openai.BetaThreadUpdateParamsToolResourcesFileSearch{ + VectorStoreIDs: openai.F([]string{"string"}), + }), + }), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Delete(context.TODO(), "thread_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadNewAndRunWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.NewAndRun(context.TODO(), openai.BetaThreadNewAndRunParams{ + AssistantID: openai.F("assistant_id"), + Instructions: openai.F("instructions"), + MaxCompletionTokens: openai.F(int64(256)), + MaxPromptTokens: openai.F(int64(256)), + Metadata: openai.F[any](map[string]interface{}{}), + Model: openai.F(openai.BetaThreadNewAndRunParamsModelGPT4o), + ParallelToolCalls: openai.F(true), + ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), + Temperature: openai.F(1.000000), + Thread: openai.F(openai.BetaThreadNewAndRunParamsThread{ + Messages: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessage{{ + Role: openai.F(openai.BetaThreadNewAndRunParamsThreadMessagesRoleUser), + Content: openai.F[openai.BetaThreadNewAndRunParamsThreadMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, { + Role: openai.F(openai.BetaThreadNewAndRunParamsThreadMessagesRoleUser), + Content: openai.F[openai.BetaThreadNewAndRunParamsThreadMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, { + Role: openai.F(openai.BetaThreadNewAndRunParamsThreadMessagesRoleUser), + Content: openai.F[openai.BetaThreadNewAndRunParamsThreadMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }}), + ToolResources: openai.F(openai.BetaThreadNewAndRunParamsThreadToolResources{ + CodeInterpreter: openai.F(openai.BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter{ + FileIDs: openai.F([]string{"string", "string", "string"}), + }), + FileSearch: openai.F(openai.BetaThreadNewAndRunParamsThreadToolResourcesFileSearch{ + VectorStoreIDs: openai.F([]string{"string"}), + VectorStores: openai.F([]openai.BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore{{ + FileIDs: openai.F([]string{"string", "string", "string"}), + ChunkingStrategy: openai.F[openai.BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyUnion](openai.BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAuto{ + Type: openai.F(openai.BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStoresChunkingStrategyAutoTypeAuto), + }), + Metadata: openai.F[any](map[string]interface{}{}), + }}), + }), + }), + Metadata: openai.F[any](map[string]interface{}{}), + }), + ToolChoice: openai.F[openai.AssistantToolChoiceOptionUnionParam](openai.AssistantToolChoiceOptionString(openai.AssistantToolChoiceOptionStringNone)), + ToolResources: openai.F(openai.BetaThreadNewAndRunParamsToolResources{ + CodeInterpreter: openai.F(openai.BetaThreadNewAndRunParamsToolResourcesCodeInterpreter{ + FileIDs: openai.F([]string{"string", "string", "string"}), + }), + FileSearch: openai.F(openai.BetaThreadNewAndRunParamsToolResourcesFileSearch{ + VectorStoreIDs: openai.F([]string{"string"}), + }), + }), + Tools: openai.F([]openai.BetaThreadNewAndRunParamsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + TopP: openai.F(1.000000), + TruncationStrategy: openai.F(openai.BetaThreadNewAndRunParamsTruncationStrategy{ + Type: openai.F(openai.BetaThreadNewAndRunParamsTruncationStrategyTypeAuto), + LastMessages: openai.F(int64(1)), + }), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betathreadmessage.go b/betathreadmessage.go new file mode 100644 index 0000000..e093d8e --- /dev/null +++ b/betathreadmessage.go @@ -0,0 +1,2031 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/tidwall/gjson" +) + +// BetaThreadMessageService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaThreadMessageService] method instead. +type BetaThreadMessageService struct { + Options []option.RequestOption +} + +// NewBetaThreadMessageService generates a new service that applies the given +// options to each request. These options are applied after the parent client's +// options (if there is one), and before any request-specific options. +func NewBetaThreadMessageService(opts ...option.RequestOption) (r *BetaThreadMessageService) { + r = &BetaThreadMessageService{} + r.Options = opts + return +} + +// Create a message. +func (r *BetaThreadMessageService) New(ctx context.Context, threadID string, body BetaThreadMessageNewParams, opts ...option.RequestOption) (res *Message, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s/messages", threadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieve a message. +func (r *BetaThreadMessageService) Get(ctx context.Context, threadID string, messageID string, opts ...option.RequestOption) (res *Message, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if messageID == "" { + err = errors.New("missing required message_id parameter") + return + } + path := fmt.Sprintf("threads/%s/messages/%s", threadID, messageID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Modifies a message. +func (r *BetaThreadMessageService) Update(ctx context.Context, threadID string, messageID string, body BetaThreadMessageUpdateParams, opts ...option.RequestOption) (res *Message, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if messageID == "" { + err = errors.New("missing required message_id parameter") + return + } + path := fmt.Sprintf("threads/%s/messages/%s", threadID, messageID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Returns a list of messages for a given thread. +func (r *BetaThreadMessageService) List(ctx context.Context, threadID string, query BetaThreadMessageListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Message], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s/messages", threadID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of messages for a given thread. +func (r *BetaThreadMessageService) ListAutoPaging(ctx context.Context, threadID string, query BetaThreadMessageListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[Message] { + return pagination.NewCursorPageAutoPager(r.List(ctx, threadID, query, opts...)) +} + +// Deletes a message. +func (r *BetaThreadMessageService) Delete(ctx context.Context, threadID string, messageID string, opts ...option.RequestOption) (res *MessageDeleted, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if messageID == "" { + err = errors.New("missing required message_id parameter") + return + } + path := fmt.Sprintf("threads/%s/messages/%s", threadID, messageID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// A citation within the message that points to a specific quote from a specific +// File associated with the assistant or the message. Generated when the assistant +// uses the "file_search" tool to search files. +type Annotation struct { + // Always `file_citation`. + Type AnnotationType `json:"type,required"` + // The text in the message content that needs to be replaced. + Text string `json:"text,required"` + // This field can have the runtime type of [FileCitationAnnotationFileCitation]. + FileCitation interface{} `json:"file_citation,required"` + StartIndex int64 `json:"start_index,required"` + EndIndex int64 `json:"end_index,required"` + // This field can have the runtime type of [FilePathAnnotationFilePath]. + FilePath interface{} `json:"file_path,required"` + JSON annotationJSON `json:"-"` + union AnnotationUnion +} + +// annotationJSON contains the JSON metadata for the struct [Annotation] +type annotationJSON struct { + Type apijson.Field + Text apijson.Field + FileCitation apijson.Field + StartIndex apijson.Field + EndIndex apijson.Field + FilePath apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r annotationJSON) RawJSON() string { + return r.raw +} + +func (r *Annotation) UnmarshalJSON(data []byte) (err error) { + *r = Annotation{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [AnnotationUnion] interface which you can cast to the specific +// types for more type safety. +// +// Possible runtime types of the union are [FileCitationAnnotation], +// [FilePathAnnotation]. +func (r Annotation) AsUnion() AnnotationUnion { + return r.union +} + +// A citation within the message that points to a specific quote from a specific +// File associated with the assistant or the message. Generated when the assistant +// uses the "file_search" tool to search files. +// +// Union satisfied by [FileCitationAnnotation] or [FilePathAnnotation]. +type AnnotationUnion interface { + implementsAnnotation() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*AnnotationUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FileCitationAnnotation{}), + DiscriminatorValue: "file_citation", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FilePathAnnotation{}), + DiscriminatorValue: "file_path", + }, + ) +} + +// Always `file_citation`. +type AnnotationType string + +const ( + AnnotationTypeFileCitation AnnotationType = "file_citation" + AnnotationTypeFilePath AnnotationType = "file_path" +) + +func (r AnnotationType) IsKnown() bool { + switch r { + case AnnotationTypeFileCitation, AnnotationTypeFilePath: + return true + } + return false +} + +// A citation within the message that points to a specific quote from a specific +// File associated with the assistant or the message. Generated when the assistant +// uses the "file_search" tool to search files. +type AnnotationDelta struct { + // The index of the annotation in the text content part. + Index int64 `json:"index,required"` + // Always `file_citation`. + Type AnnotationDeltaType `json:"type,required"` + // The text in the message content that needs to be replaced. + Text string `json:"text"` + // This field can have the runtime type of + // [FileCitationDeltaAnnotationFileCitation]. + FileCitation interface{} `json:"file_citation,required"` + StartIndex int64 `json:"start_index"` + EndIndex int64 `json:"end_index"` + // This field can have the runtime type of [FilePathDeltaAnnotationFilePath]. + FilePath interface{} `json:"file_path,required"` + JSON annotationDeltaJSON `json:"-"` + union AnnotationDeltaUnion +} + +// annotationDeltaJSON contains the JSON metadata for the struct [AnnotationDelta] +type annotationDeltaJSON struct { + Index apijson.Field + Type apijson.Field + Text apijson.Field + FileCitation apijson.Field + StartIndex apijson.Field + EndIndex apijson.Field + FilePath apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r annotationDeltaJSON) RawJSON() string { + return r.raw +} + +func (r *AnnotationDelta) UnmarshalJSON(data []byte) (err error) { + *r = AnnotationDelta{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [AnnotationDeltaUnion] interface which you can cast to the +// specific types for more type safety. +// +// Possible runtime types of the union are [FileCitationDeltaAnnotation], +// [FilePathDeltaAnnotation]. +func (r AnnotationDelta) AsUnion() AnnotationDeltaUnion { + return r.union +} + +// A citation within the message that points to a specific quote from a specific +// File associated with the assistant or the message. Generated when the assistant +// uses the "file_search" tool to search files. +// +// Union satisfied by [FileCitationDeltaAnnotation] or [FilePathDeltaAnnotation]. +type AnnotationDeltaUnion interface { + implementsAnnotationDelta() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*AnnotationDeltaUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FileCitationDeltaAnnotation{}), + DiscriminatorValue: "file_citation", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FilePathDeltaAnnotation{}), + DiscriminatorValue: "file_path", + }, + ) +} + +// Always `file_citation`. +type AnnotationDeltaType string + +const ( + AnnotationDeltaTypeFileCitation AnnotationDeltaType = "file_citation" + AnnotationDeltaTypeFilePath AnnotationDeltaType = "file_path" +) + +func (r AnnotationDeltaType) IsKnown() bool { + switch r { + case AnnotationDeltaTypeFileCitation, AnnotationDeltaTypeFilePath: + return true + } + return false +} + +// A citation within the message that points to a specific quote from a specific +// File associated with the assistant or the message. Generated when the assistant +// uses the "file_search" tool to search files. +type FileCitationAnnotation struct { + EndIndex int64 `json:"end_index,required"` + FileCitation FileCitationAnnotationFileCitation `json:"file_citation,required"` + StartIndex int64 `json:"start_index,required"` + // The text in the message content that needs to be replaced. + Text string `json:"text,required"` + // Always `file_citation`. + Type FileCitationAnnotationType `json:"type,required"` + JSON fileCitationAnnotationJSON `json:"-"` +} + +// fileCitationAnnotationJSON contains the JSON metadata for the struct +// [FileCitationAnnotation] +type fileCitationAnnotationJSON struct { + EndIndex apijson.Field + FileCitation apijson.Field + StartIndex apijson.Field + Text apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileCitationAnnotation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileCitationAnnotationJSON) RawJSON() string { + return r.raw +} + +func (r FileCitationAnnotation) implementsAnnotation() {} + +type FileCitationAnnotationFileCitation struct { + // The ID of the specific File the citation is from. + FileID string `json:"file_id,required"` + JSON fileCitationAnnotationFileCitationJSON `json:"-"` +} + +// fileCitationAnnotationFileCitationJSON contains the JSON metadata for the struct +// [FileCitationAnnotationFileCitation] +type fileCitationAnnotationFileCitationJSON struct { + FileID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileCitationAnnotationFileCitation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileCitationAnnotationFileCitationJSON) RawJSON() string { + return r.raw +} + +// Always `file_citation`. +type FileCitationAnnotationType string + +const ( + FileCitationAnnotationTypeFileCitation FileCitationAnnotationType = "file_citation" +) + +func (r FileCitationAnnotationType) IsKnown() bool { + switch r { + case FileCitationAnnotationTypeFileCitation: + return true + } + return false +} + +// A citation within the message that points to a specific quote from a specific +// File associated with the assistant or the message. Generated when the assistant +// uses the "file_search" tool to search files. +type FileCitationDeltaAnnotation struct { + // The index of the annotation in the text content part. + Index int64 `json:"index,required"` + // Always `file_citation`. + Type FileCitationDeltaAnnotationType `json:"type,required"` + EndIndex int64 `json:"end_index"` + FileCitation FileCitationDeltaAnnotationFileCitation `json:"file_citation"` + StartIndex int64 `json:"start_index"` + // The text in the message content that needs to be replaced. + Text string `json:"text"` + JSON fileCitationDeltaAnnotationJSON `json:"-"` +} + +// fileCitationDeltaAnnotationJSON contains the JSON metadata for the struct +// [FileCitationDeltaAnnotation] +type fileCitationDeltaAnnotationJSON struct { + Index apijson.Field + Type apijson.Field + EndIndex apijson.Field + FileCitation apijson.Field + StartIndex apijson.Field + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileCitationDeltaAnnotation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileCitationDeltaAnnotationJSON) RawJSON() string { + return r.raw +} + +func (r FileCitationDeltaAnnotation) implementsAnnotationDelta() {} + +// Always `file_citation`. +type FileCitationDeltaAnnotationType string + +const ( + FileCitationDeltaAnnotationTypeFileCitation FileCitationDeltaAnnotationType = "file_citation" +) + +func (r FileCitationDeltaAnnotationType) IsKnown() bool { + switch r { + case FileCitationDeltaAnnotationTypeFileCitation: + return true + } + return false +} + +type FileCitationDeltaAnnotationFileCitation struct { + // The ID of the specific File the citation is from. + FileID string `json:"file_id"` + // The specific quote in the file. + Quote string `json:"quote"` + JSON fileCitationDeltaAnnotationFileCitationJSON `json:"-"` +} + +// fileCitationDeltaAnnotationFileCitationJSON contains the JSON metadata for the +// struct [FileCitationDeltaAnnotationFileCitation] +type fileCitationDeltaAnnotationFileCitationJSON struct { + FileID apijson.Field + Quote apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileCitationDeltaAnnotationFileCitation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileCitationDeltaAnnotationFileCitationJSON) RawJSON() string { + return r.raw +} + +// A URL for the file that's generated when the assistant used the +// `code_interpreter` tool to generate a file. +type FilePathAnnotation struct { + EndIndex int64 `json:"end_index,required"` + FilePath FilePathAnnotationFilePath `json:"file_path,required"` + StartIndex int64 `json:"start_index,required"` + // The text in the message content that needs to be replaced. + Text string `json:"text,required"` + // Always `file_path`. + Type FilePathAnnotationType `json:"type,required"` + JSON filePathAnnotationJSON `json:"-"` +} + +// filePathAnnotationJSON contains the JSON metadata for the struct +// [FilePathAnnotation] +type filePathAnnotationJSON struct { + EndIndex apijson.Field + FilePath apijson.Field + StartIndex apijson.Field + Text apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FilePathAnnotation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r filePathAnnotationJSON) RawJSON() string { + return r.raw +} + +func (r FilePathAnnotation) implementsAnnotation() {} + +type FilePathAnnotationFilePath struct { + // The ID of the file that was generated. + FileID string `json:"file_id,required"` + JSON filePathAnnotationFilePathJSON `json:"-"` +} + +// filePathAnnotationFilePathJSON contains the JSON metadata for the struct +// [FilePathAnnotationFilePath] +type filePathAnnotationFilePathJSON struct { + FileID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FilePathAnnotationFilePath) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r filePathAnnotationFilePathJSON) RawJSON() string { + return r.raw +} + +// Always `file_path`. +type FilePathAnnotationType string + +const ( + FilePathAnnotationTypeFilePath FilePathAnnotationType = "file_path" +) + +func (r FilePathAnnotationType) IsKnown() bool { + switch r { + case FilePathAnnotationTypeFilePath: + return true + } + return false +} + +// A URL for the file that's generated when the assistant used the +// `code_interpreter` tool to generate a file. +type FilePathDeltaAnnotation struct { + // The index of the annotation in the text content part. + Index int64 `json:"index,required"` + // Always `file_path`. + Type FilePathDeltaAnnotationType `json:"type,required"` + EndIndex int64 `json:"end_index"` + FilePath FilePathDeltaAnnotationFilePath `json:"file_path"` + StartIndex int64 `json:"start_index"` + // The text in the message content that needs to be replaced. + Text string `json:"text"` + JSON filePathDeltaAnnotationJSON `json:"-"` +} + +// filePathDeltaAnnotationJSON contains the JSON metadata for the struct +// [FilePathDeltaAnnotation] +type filePathDeltaAnnotationJSON struct { + Index apijson.Field + Type apijson.Field + EndIndex apijson.Field + FilePath apijson.Field + StartIndex apijson.Field + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FilePathDeltaAnnotation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r filePathDeltaAnnotationJSON) RawJSON() string { + return r.raw +} + +func (r FilePathDeltaAnnotation) implementsAnnotationDelta() {} + +// Always `file_path`. +type FilePathDeltaAnnotationType string + +const ( + FilePathDeltaAnnotationTypeFilePath FilePathDeltaAnnotationType = "file_path" +) + +func (r FilePathDeltaAnnotationType) IsKnown() bool { + switch r { + case FilePathDeltaAnnotationTypeFilePath: + return true + } + return false +} + +type FilePathDeltaAnnotationFilePath struct { + // The ID of the file that was generated. + FileID string `json:"file_id"` + JSON filePathDeltaAnnotationFilePathJSON `json:"-"` +} + +// filePathDeltaAnnotationFilePathJSON contains the JSON metadata for the struct +// [FilePathDeltaAnnotationFilePath] +type filePathDeltaAnnotationFilePathJSON struct { + FileID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FilePathDeltaAnnotationFilePath) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r filePathDeltaAnnotationFilePathJSON) RawJSON() string { + return r.raw +} + +type ImageFile struct { + // The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + // in the message content. Set `purpose="vision"` when uploading the File if you + // need to later display the file content. + FileID string `json:"file_id,required"` + // Specifies the detail level of the image if specified by the user. `low` uses + // fewer tokens, you can opt in to high resolution using `high`. + Detail ImageFileDetail `json:"detail"` + JSON imageFileJSON `json:"-"` +} + +// imageFileJSON contains the JSON metadata for the struct [ImageFile] +type imageFileJSON struct { + FileID apijson.Field + Detail apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageFile) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageFileJSON) RawJSON() string { + return r.raw +} + +// Specifies the detail level of the image if specified by the user. `low` uses +// fewer tokens, you can opt in to high resolution using `high`. +type ImageFileDetail string + +const ( + ImageFileDetailAuto ImageFileDetail = "auto" + ImageFileDetailLow ImageFileDetail = "low" + ImageFileDetailHigh ImageFileDetail = "high" +) + +func (r ImageFileDetail) IsKnown() bool { + switch r { + case ImageFileDetailAuto, ImageFileDetailLow, ImageFileDetailHigh: + return true + } + return false +} + +type ImageFileParam struct { + // The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + // in the message content. Set `purpose="vision"` when uploading the File if you + // need to later display the file content. + FileID param.Field[string] `json:"file_id,required"` + // Specifies the detail level of the image if specified by the user. `low` uses + // fewer tokens, you can opt in to high resolution using `high`. + Detail param.Field[ImageFileDetail] `json:"detail"` +} + +func (r ImageFileParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +type ImageFileContentBlock struct { + ImageFile ImageFile `json:"image_file,required"` + // Always `image_file`. + Type ImageFileContentBlockType `json:"type,required"` + JSON imageFileContentBlockJSON `json:"-"` +} + +// imageFileContentBlockJSON contains the JSON metadata for the struct +// [ImageFileContentBlock] +type imageFileContentBlockJSON struct { + ImageFile apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageFileContentBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageFileContentBlockJSON) RawJSON() string { + return r.raw +} + +func (r ImageFileContentBlock) implementsMessageContent() {} + +// Always `image_file`. +type ImageFileContentBlockType string + +const ( + ImageFileContentBlockTypeImageFile ImageFileContentBlockType = "image_file" +) + +func (r ImageFileContentBlockType) IsKnown() bool { + switch r { + case ImageFileContentBlockTypeImageFile: + return true + } + return false +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +type ImageFileContentBlockParam struct { + ImageFile param.Field[ImageFileParam] `json:"image_file,required"` + // Always `image_file`. + Type param.Field[ImageFileContentBlockType] `json:"type,required"` +} + +func (r ImageFileContentBlockParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ImageFileContentBlockParam) implementsMessageContentPartParamUnion() {} + +type ImageFileDelta struct { + // Specifies the detail level of the image if specified by the user. `low` uses + // fewer tokens, you can opt in to high resolution using `high`. + Detail ImageFileDeltaDetail `json:"detail"` + // The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + // in the message content. Set `purpose="vision"` when uploading the File if you + // need to later display the file content. + FileID string `json:"file_id"` + JSON imageFileDeltaJSON `json:"-"` +} + +// imageFileDeltaJSON contains the JSON metadata for the struct [ImageFileDelta] +type imageFileDeltaJSON struct { + Detail apijson.Field + FileID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageFileDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageFileDeltaJSON) RawJSON() string { + return r.raw +} + +// Specifies the detail level of the image if specified by the user. `low` uses +// fewer tokens, you can opt in to high resolution using `high`. +type ImageFileDeltaDetail string + +const ( + ImageFileDeltaDetailAuto ImageFileDeltaDetail = "auto" + ImageFileDeltaDetailLow ImageFileDeltaDetail = "low" + ImageFileDeltaDetailHigh ImageFileDeltaDetail = "high" +) + +func (r ImageFileDeltaDetail) IsKnown() bool { + switch r { + case ImageFileDeltaDetailAuto, ImageFileDeltaDetailLow, ImageFileDeltaDetailHigh: + return true + } + return false +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +type ImageFileDeltaBlock struct { + // The index of the content part in the message. + Index int64 `json:"index,required"` + // Always `image_file`. + Type ImageFileDeltaBlockType `json:"type,required"` + ImageFile ImageFileDelta `json:"image_file"` + JSON imageFileDeltaBlockJSON `json:"-"` +} + +// imageFileDeltaBlockJSON contains the JSON metadata for the struct +// [ImageFileDeltaBlock] +type imageFileDeltaBlockJSON struct { + Index apijson.Field + Type apijson.Field + ImageFile apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageFileDeltaBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageFileDeltaBlockJSON) RawJSON() string { + return r.raw +} + +func (r ImageFileDeltaBlock) implementsMessageContentDelta() {} + +// Always `image_file`. +type ImageFileDeltaBlockType string + +const ( + ImageFileDeltaBlockTypeImageFile ImageFileDeltaBlockType = "image_file" +) + +func (r ImageFileDeltaBlockType) IsKnown() bool { + switch r { + case ImageFileDeltaBlockTypeImageFile: + return true + } + return false +} + +type ImageURL struct { + // The external URL of the image, must be a supported image types: jpeg, jpg, png, + // gif, webp. + URL string `json:"url,required" format:"uri"` + // Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + // to high resolution using `high`. Default value is `auto` + Detail ImageURLDetail `json:"detail"` + JSON imageURLJSON `json:"-"` +} + +// imageURLJSON contains the JSON metadata for the struct [ImageURL] +type imageURLJSON struct { + URL apijson.Field + Detail apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageURL) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageURLJSON) RawJSON() string { + return r.raw +} + +// Specifies the detail level of the image. `low` uses fewer tokens, you can opt in +// to high resolution using `high`. Default value is `auto` +type ImageURLDetail string + +const ( + ImageURLDetailAuto ImageURLDetail = "auto" + ImageURLDetailLow ImageURLDetail = "low" + ImageURLDetailHigh ImageURLDetail = "high" +) + +func (r ImageURLDetail) IsKnown() bool { + switch r { + case ImageURLDetailAuto, ImageURLDetailLow, ImageURLDetailHigh: + return true + } + return false +} + +type ImageURLParam struct { + // The external URL of the image, must be a supported image types: jpeg, jpg, png, + // gif, webp. + URL param.Field[string] `json:"url,required" format:"uri"` + // Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + // to high resolution using `high`. Default value is `auto` + Detail param.Field[ImageURLDetail] `json:"detail"` +} + +func (r ImageURLParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// References an image URL in the content of a message. +type ImageURLContentBlock struct { + ImageURL ImageURL `json:"image_url,required"` + // The type of the content part. + Type ImageURLContentBlockType `json:"type,required"` + JSON imageURLContentBlockJSON `json:"-"` +} + +// imageURLContentBlockJSON contains the JSON metadata for the struct +// [ImageURLContentBlock] +type imageURLContentBlockJSON struct { + ImageURL apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageURLContentBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageURLContentBlockJSON) RawJSON() string { + return r.raw +} + +func (r ImageURLContentBlock) implementsMessageContent() {} + +// The type of the content part. +type ImageURLContentBlockType string + +const ( + ImageURLContentBlockTypeImageURL ImageURLContentBlockType = "image_url" +) + +func (r ImageURLContentBlockType) IsKnown() bool { + switch r { + case ImageURLContentBlockTypeImageURL: + return true + } + return false +} + +// References an image URL in the content of a message. +type ImageURLContentBlockParam struct { + ImageURL param.Field[ImageURLParam] `json:"image_url,required"` + // The type of the content part. + Type param.Field[ImageURLContentBlockType] `json:"type,required"` +} + +func (r ImageURLContentBlockParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ImageURLContentBlockParam) implementsMessageContentPartParamUnion() {} + +type ImageURLDelta struct { + // Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + // to high resolution using `high`. + Detail ImageURLDeltaDetail `json:"detail"` + // The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + // webp. + URL string `json:"url"` + JSON imageURLDeltaJSON `json:"-"` +} + +// imageURLDeltaJSON contains the JSON metadata for the struct [ImageURLDelta] +type imageURLDeltaJSON struct { + Detail apijson.Field + URL apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageURLDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageURLDeltaJSON) RawJSON() string { + return r.raw +} + +// Specifies the detail level of the image. `low` uses fewer tokens, you can opt in +// to high resolution using `high`. +type ImageURLDeltaDetail string + +const ( + ImageURLDeltaDetailAuto ImageURLDeltaDetail = "auto" + ImageURLDeltaDetailLow ImageURLDeltaDetail = "low" + ImageURLDeltaDetailHigh ImageURLDeltaDetail = "high" +) + +func (r ImageURLDeltaDetail) IsKnown() bool { + switch r { + case ImageURLDeltaDetailAuto, ImageURLDeltaDetailLow, ImageURLDeltaDetailHigh: + return true + } + return false +} + +// References an image URL in the content of a message. +type ImageURLDeltaBlock struct { + // The index of the content part in the message. + Index int64 `json:"index,required"` + // Always `image_url`. + Type ImageURLDeltaBlockType `json:"type,required"` + ImageURL ImageURLDelta `json:"image_url"` + JSON imageURLDeltaBlockJSON `json:"-"` +} + +// imageURLDeltaBlockJSON contains the JSON metadata for the struct +// [ImageURLDeltaBlock] +type imageURLDeltaBlockJSON struct { + Index apijson.Field + Type apijson.Field + ImageURL apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImageURLDeltaBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageURLDeltaBlockJSON) RawJSON() string { + return r.raw +} + +func (r ImageURLDeltaBlock) implementsMessageContentDelta() {} + +// Always `image_url`. +type ImageURLDeltaBlockType string + +const ( + ImageURLDeltaBlockTypeImageURL ImageURLDeltaBlockType = "image_url" +) + +func (r ImageURLDeltaBlockType) IsKnown() bool { + switch r { + case ImageURLDeltaBlockTypeImageURL: + return true + } + return false +} + +// Represents a message within a +// [thread](https://platform.openai.com/docs/api-reference/threads). +type Message struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // If applicable, the ID of the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) that + // authored this message. + AssistantID string `json:"assistant_id,required,nullable"` + // A list of files attached to the message, and the tools they were added to. + Attachments []MessageAttachment `json:"attachments,required,nullable"` + // The Unix timestamp (in seconds) for when the message was completed. + CompletedAt int64 `json:"completed_at,required,nullable"` + // The content of the message in array of text and/or images. + Content []MessageContent `json:"content,required"` + // The Unix timestamp (in seconds) for when the message was created. + CreatedAt int64 `json:"created_at,required"` + // The Unix timestamp (in seconds) for when the message was marked as incomplete. + IncompleteAt int64 `json:"incomplete_at,required,nullable"` + // On an incomplete message, details about why the message is incomplete. + IncompleteDetails MessageIncompleteDetails `json:"incomplete_details,required,nullable"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,required,nullable"` + // The object type, which is always `thread.message`. + Object MessageObject `json:"object,required"` + // The entity that produced the message. One of `user` or `assistant`. + Role MessageRole `json:"role,required"` + // The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + // associated with the creation of this message. Value is `null` when messages are + // created manually using the create message or create thread endpoints. + RunID string `json:"run_id,required,nullable"` + // The status of the message, which can be either `in_progress`, `incomplete`, or + // `completed`. + Status MessageStatus `json:"status,required"` + // The [thread](https://platform.openai.com/docs/api-reference/threads) ID that + // this message belongs to. + ThreadID string `json:"thread_id,required"` + JSON messageJSON `json:"-"` +} + +// messageJSON contains the JSON metadata for the struct [Message] +type messageJSON struct { + ID apijson.Field + AssistantID apijson.Field + Attachments apijson.Field + CompletedAt apijson.Field + Content apijson.Field + CreatedAt apijson.Field + IncompleteAt apijson.Field + IncompleteDetails apijson.Field + Metadata apijson.Field + Object apijson.Field + Role apijson.Field + RunID apijson.Field + Status apijson.Field + ThreadID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Message) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageJSON) RawJSON() string { + return r.raw +} + +type MessageAttachment struct { + // The ID of the file to attach to the message. + FileID string `json:"file_id"` + // The tools to add this file to. + Tools []MessageAttachmentsTool `json:"tools"` + JSON messageAttachmentJSON `json:"-"` +} + +// messageAttachmentJSON contains the JSON metadata for the struct +// [MessageAttachment] +type messageAttachmentJSON struct { + FileID apijson.Field + Tools apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageAttachment) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageAttachmentJSON) RawJSON() string { + return r.raw +} + +type MessageAttachmentsTool struct { + // The type of tool being defined: `code_interpreter` + Type MessageAttachmentsToolsType `json:"type,required"` + JSON messageAttachmentsToolJSON `json:"-"` + union MessageAttachmentsToolsUnion +} + +// messageAttachmentsToolJSON contains the JSON metadata for the struct +// [MessageAttachmentsTool] +type messageAttachmentsToolJSON struct { + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r messageAttachmentsToolJSON) RawJSON() string { + return r.raw +} + +func (r *MessageAttachmentsTool) UnmarshalJSON(data []byte) (err error) { + *r = MessageAttachmentsTool{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [MessageAttachmentsToolsUnion] interface which you can cast to +// the specific types for more type safety. +// +// Possible runtime types of the union are [CodeInterpreterTool], +// [MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly]. +func (r MessageAttachmentsTool) AsUnion() MessageAttachmentsToolsUnion { + return r.union +} + +// Union satisfied by [CodeInterpreterTool] or +// [MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly]. +type MessageAttachmentsToolsUnion interface { + implementsMessageAttachmentsTool() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*MessageAttachmentsToolsUnion)(nil)).Elem(), + "", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterTool{}), + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly{}), + }, + ) +} + +type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly struct { + // The type of tool being defined: `file_search` + Type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType `json:"type,required"` + JSON messageAttachmentsToolsAssistantToolsFileSearchTypeOnlyJSON `json:"-"` +} + +// messageAttachmentsToolsAssistantToolsFileSearchTypeOnlyJSON contains the JSON +// metadata for the struct +// [MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly] +type messageAttachmentsToolsAssistantToolsFileSearchTypeOnlyJSON struct { + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageAttachmentsToolsAssistantToolsFileSearchTypeOnlyJSON) RawJSON() string { + return r.raw +} + +func (r MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly) implementsMessageAttachmentsTool() {} + +// The type of tool being defined: `file_search` +type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType string + +const ( + MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyTypeFileSearch MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType = "file_search" +) + +func (r MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType) IsKnown() bool { + switch r { + case MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyTypeFileSearch: + return true + } + return false +} + +// The type of tool being defined: `code_interpreter` +type MessageAttachmentsToolsType string + +const ( + MessageAttachmentsToolsTypeCodeInterpreter MessageAttachmentsToolsType = "code_interpreter" + MessageAttachmentsToolsTypeFileSearch MessageAttachmentsToolsType = "file_search" +) + +func (r MessageAttachmentsToolsType) IsKnown() bool { + switch r { + case MessageAttachmentsToolsTypeCodeInterpreter, MessageAttachmentsToolsTypeFileSearch: + return true + } + return false +} + +// On an incomplete message, details about why the message is incomplete. +type MessageIncompleteDetails struct { + // The reason the message is incomplete. + Reason MessageIncompleteDetailsReason `json:"reason,required"` + JSON messageIncompleteDetailsJSON `json:"-"` +} + +// messageIncompleteDetailsJSON contains the JSON metadata for the struct +// [MessageIncompleteDetails] +type messageIncompleteDetailsJSON struct { + Reason apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageIncompleteDetails) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageIncompleteDetailsJSON) RawJSON() string { + return r.raw +} + +// The reason the message is incomplete. +type MessageIncompleteDetailsReason string + +const ( + MessageIncompleteDetailsReasonContentFilter MessageIncompleteDetailsReason = "content_filter" + MessageIncompleteDetailsReasonMaxTokens MessageIncompleteDetailsReason = "max_tokens" + MessageIncompleteDetailsReasonRunCancelled MessageIncompleteDetailsReason = "run_cancelled" + MessageIncompleteDetailsReasonRunExpired MessageIncompleteDetailsReason = "run_expired" + MessageIncompleteDetailsReasonRunFailed MessageIncompleteDetailsReason = "run_failed" +) + +func (r MessageIncompleteDetailsReason) IsKnown() bool { + switch r { + case MessageIncompleteDetailsReasonContentFilter, MessageIncompleteDetailsReasonMaxTokens, MessageIncompleteDetailsReasonRunCancelled, MessageIncompleteDetailsReasonRunExpired, MessageIncompleteDetailsReasonRunFailed: + return true + } + return false +} + +// The object type, which is always `thread.message`. +type MessageObject string + +const ( + MessageObjectThreadMessage MessageObject = "thread.message" +) + +func (r MessageObject) IsKnown() bool { + switch r { + case MessageObjectThreadMessage: + return true + } + return false +} + +// The entity that produced the message. One of `user` or `assistant`. +type MessageRole string + +const ( + MessageRoleUser MessageRole = "user" + MessageRoleAssistant MessageRole = "assistant" +) + +func (r MessageRole) IsKnown() bool { + switch r { + case MessageRoleUser, MessageRoleAssistant: + return true + } + return false +} + +// The status of the message, which can be either `in_progress`, `incomplete`, or +// `completed`. +type MessageStatus string + +const ( + MessageStatusInProgress MessageStatus = "in_progress" + MessageStatusIncomplete MessageStatus = "incomplete" + MessageStatusCompleted MessageStatus = "completed" +) + +func (r MessageStatus) IsKnown() bool { + switch r { + case MessageStatusInProgress, MessageStatusIncomplete, MessageStatusCompleted: + return true + } + return false +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +type MessageContent struct { + // Always `image_file`. + Type MessageContentType `json:"type,required"` + ImageFile ImageFile `json:"image_file"` + ImageURL ImageURL `json:"image_url"` + Text Text `json:"text"` + JSON messageContentJSON `json:"-"` + union MessageContentUnion +} + +// messageContentJSON contains the JSON metadata for the struct [MessageContent] +type messageContentJSON struct { + Type apijson.Field + ImageFile apijson.Field + ImageURL apijson.Field + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r messageContentJSON) RawJSON() string { + return r.raw +} + +func (r *MessageContent) UnmarshalJSON(data []byte) (err error) { + *r = MessageContent{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [MessageContentUnion] interface which you can cast to the +// specific types for more type safety. +// +// Possible runtime types of the union are [ImageFileContentBlock], +// [ImageURLContentBlock], [TextContentBlock]. +func (r MessageContent) AsUnion() MessageContentUnion { + return r.union +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +// +// Union satisfied by [ImageFileContentBlock], [ImageURLContentBlock] or +// [TextContentBlock]. +type MessageContentUnion interface { + implementsMessageContent() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*MessageContentUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ImageFileContentBlock{}), + DiscriminatorValue: "image_file", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ImageURLContentBlock{}), + DiscriminatorValue: "image_url", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(TextContentBlock{}), + DiscriminatorValue: "text", + }, + ) +} + +// Always `image_file`. +type MessageContentType string + +const ( + MessageContentTypeImageFile MessageContentType = "image_file" + MessageContentTypeImageURL MessageContentType = "image_url" + MessageContentTypeText MessageContentType = "text" +) + +func (r MessageContentType) IsKnown() bool { + switch r { + case MessageContentTypeImageFile, MessageContentTypeImageURL, MessageContentTypeText: + return true + } + return false +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +type MessageContentDelta struct { + // The index of the content part in the message. + Index int64 `json:"index,required"` + // Always `image_file`. + Type MessageContentDeltaType `json:"type,required"` + ImageFile ImageFileDelta `json:"image_file"` + Text TextDelta `json:"text"` + ImageURL ImageURLDelta `json:"image_url"` + JSON messageContentDeltaJSON `json:"-"` + union MessageContentDeltaUnion +} + +// messageContentDeltaJSON contains the JSON metadata for the struct +// [MessageContentDelta] +type messageContentDeltaJSON struct { + Index apijson.Field + Type apijson.Field + ImageFile apijson.Field + Text apijson.Field + ImageURL apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r messageContentDeltaJSON) RawJSON() string { + return r.raw +} + +func (r *MessageContentDelta) UnmarshalJSON(data []byte) (err error) { + *r = MessageContentDelta{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [MessageContentDeltaUnion] interface which you can cast to the +// specific types for more type safety. +// +// Possible runtime types of the union are [ImageFileDeltaBlock], [TextDeltaBlock], +// [ImageURLDeltaBlock]. +func (r MessageContentDelta) AsUnion() MessageContentDeltaUnion { + return r.union +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +// +// Union satisfied by [ImageFileDeltaBlock], [TextDeltaBlock] or +// [ImageURLDeltaBlock]. +type MessageContentDeltaUnion interface { + implementsMessageContentDelta() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*MessageContentDeltaUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ImageFileDeltaBlock{}), + DiscriminatorValue: "image_file", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(TextDeltaBlock{}), + DiscriminatorValue: "text", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ImageURLDeltaBlock{}), + DiscriminatorValue: "image_url", + }, + ) +} + +// Always `image_file`. +type MessageContentDeltaType string + +const ( + MessageContentDeltaTypeImageFile MessageContentDeltaType = "image_file" + MessageContentDeltaTypeText MessageContentDeltaType = "text" + MessageContentDeltaTypeImageURL MessageContentDeltaType = "image_url" +) + +func (r MessageContentDeltaType) IsKnown() bool { + switch r { + case MessageContentDeltaTypeImageFile, MessageContentDeltaTypeText, MessageContentDeltaTypeImageURL: + return true + } + return false +} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +type MessageContentPartParam struct { + // Always `image_file`. + Type param.Field[MessageContentPartParamType] `json:"type,required"` + ImageFile param.Field[ImageFileParam] `json:"image_file"` + ImageURL param.Field[ImageURLParam] `json:"image_url"` + // Text content to be sent to the model + Text param.Field[string] `json:"text"` +} + +func (r MessageContentPartParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r MessageContentPartParam) implementsMessageContentPartParamUnion() {} + +// References an image [File](https://platform.openai.com/docs/api-reference/files) +// in the content of a message. +// +// Satisfied by [ImageFileContentBlockParam], [ImageURLContentBlockParam], +// [TextContentBlockParam], [MessageContentPartParam]. +type MessageContentPartParamUnion interface { + implementsMessageContentPartParamUnion() +} + +// Always `image_file`. +type MessageContentPartParamType string + +const ( + MessageContentPartParamTypeImageFile MessageContentPartParamType = "image_file" + MessageContentPartParamTypeImageURL MessageContentPartParamType = "image_url" + MessageContentPartParamTypeText MessageContentPartParamType = "text" +) + +func (r MessageContentPartParamType) IsKnown() bool { + switch r { + case MessageContentPartParamTypeImageFile, MessageContentPartParamTypeImageURL, MessageContentPartParamTypeText: + return true + } + return false +} + +type MessageDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object MessageDeletedObject `json:"object,required"` + JSON messageDeletedJSON `json:"-"` +} + +// messageDeletedJSON contains the JSON metadata for the struct [MessageDeleted] +type messageDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageDeletedJSON) RawJSON() string { + return r.raw +} + +type MessageDeletedObject string + +const ( + MessageDeletedObjectThreadMessageDeleted MessageDeletedObject = "thread.message.deleted" +) + +func (r MessageDeletedObject) IsKnown() bool { + switch r { + case MessageDeletedObjectThreadMessageDeleted: + return true + } + return false +} + +// The delta containing the fields that have changed on the Message. +type MessageDelta struct { + // The content of the message in array of text and/or images. + Content []MessageContentDelta `json:"content"` + // The entity that produced the message. One of `user` or `assistant`. + Role MessageDeltaRole `json:"role"` + JSON messageDeltaJSON `json:"-"` +} + +// messageDeltaJSON contains the JSON metadata for the struct [MessageDelta] +type messageDeltaJSON struct { + Content apijson.Field + Role apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageDeltaJSON) RawJSON() string { + return r.raw +} + +// The entity that produced the message. One of `user` or `assistant`. +type MessageDeltaRole string + +const ( + MessageDeltaRoleUser MessageDeltaRole = "user" + MessageDeltaRoleAssistant MessageDeltaRole = "assistant" +) + +func (r MessageDeltaRole) IsKnown() bool { + switch r { + case MessageDeltaRoleUser, MessageDeltaRoleAssistant: + return true + } + return false +} + +// Represents a message delta i.e. any changed fields on a message during +// streaming. +type MessageDeltaEvent struct { + // The identifier of the message, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The delta containing the fields that have changed on the Message. + Delta MessageDelta `json:"delta,required"` + // The object type, which is always `thread.message.delta`. + Object MessageDeltaEventObject `json:"object,required"` + JSON messageDeltaEventJSON `json:"-"` +} + +// messageDeltaEventJSON contains the JSON metadata for the struct +// [MessageDeltaEvent] +type messageDeltaEventJSON struct { + ID apijson.Field + Delta apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageDeltaEvent) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageDeltaEventJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `thread.message.delta`. +type MessageDeltaEventObject string + +const ( + MessageDeltaEventObjectThreadMessageDelta MessageDeltaEventObject = "thread.message.delta" +) + +func (r MessageDeltaEventObject) IsKnown() bool { + switch r { + case MessageDeltaEventObjectThreadMessageDelta: + return true + } + return false +} + +type Text struct { + Annotations []Annotation `json:"annotations,required"` + // The data that makes up the text. + Value string `json:"value,required"` + JSON textJSON `json:"-"` +} + +// textJSON contains the JSON metadata for the struct [Text] +type textJSON struct { + Annotations apijson.Field + Value apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Text) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r textJSON) RawJSON() string { + return r.raw +} + +// The text content that is part of a message. +type TextContentBlock struct { + Text Text `json:"text,required"` + // Always `text`. + Type TextContentBlockType `json:"type,required"` + JSON textContentBlockJSON `json:"-"` +} + +// textContentBlockJSON contains the JSON metadata for the struct +// [TextContentBlock] +type textContentBlockJSON struct { + Text apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TextContentBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r textContentBlockJSON) RawJSON() string { + return r.raw +} + +func (r TextContentBlock) implementsMessageContent() {} + +// Always `text`. +type TextContentBlockType string + +const ( + TextContentBlockTypeText TextContentBlockType = "text" +) + +func (r TextContentBlockType) IsKnown() bool { + switch r { + case TextContentBlockTypeText: + return true + } + return false +} + +// The text content that is part of a message. +type TextContentBlockParam struct { + // Text content to be sent to the model + Text param.Field[string] `json:"text,required"` + // Always `text`. + Type param.Field[TextContentBlockParamType] `json:"type,required"` +} + +func (r TextContentBlockParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r TextContentBlockParam) implementsMessageContentPartParamUnion() {} + +// Always `text`. +type TextContentBlockParamType string + +const ( + TextContentBlockParamTypeText TextContentBlockParamType = "text" +) + +func (r TextContentBlockParamType) IsKnown() bool { + switch r { + case TextContentBlockParamTypeText: + return true + } + return false +} + +type TextDelta struct { + Annotations []AnnotationDelta `json:"annotations"` + // The data that makes up the text. + Value string `json:"value"` + JSON textDeltaJSON `json:"-"` +} + +// textDeltaJSON contains the JSON metadata for the struct [TextDelta] +type textDeltaJSON struct { + Annotations apijson.Field + Value apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TextDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r textDeltaJSON) RawJSON() string { + return r.raw +} + +// The text content that is part of a message. +type TextDeltaBlock struct { + // The index of the content part in the message. + Index int64 `json:"index,required"` + // Always `text`. + Type TextDeltaBlockType `json:"type,required"` + Text TextDelta `json:"text"` + JSON textDeltaBlockJSON `json:"-"` +} + +// textDeltaBlockJSON contains the JSON metadata for the struct [TextDeltaBlock] +type textDeltaBlockJSON struct { + Index apijson.Field + Type apijson.Field + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TextDeltaBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r textDeltaBlockJSON) RawJSON() string { + return r.raw +} + +func (r TextDeltaBlock) implementsMessageContentDelta() {} + +// Always `text`. +type TextDeltaBlockType string + +const ( + TextDeltaBlockTypeText TextDeltaBlockType = "text" +) + +func (r TextDeltaBlockType) IsKnown() bool { + switch r { + case TextDeltaBlockTypeText: + return true + } + return false +} + +type BetaThreadMessageNewParams struct { + // The text contents of the message. + Content param.Field[BetaThreadMessageNewParamsContentUnion] `json:"content,required"` + // The role of the entity that is creating the message. Allowed values include: + // + // - `user`: Indicates the message is sent by an actual user and should be used in + // most cases to represent user-generated messages. + // - `assistant`: Indicates the message is generated by the assistant. Use this + // value to insert messages from the assistant into the conversation. + Role param.Field[BetaThreadMessageNewParamsRole] `json:"role,required"` + // A list of files attached to the message, and the tools they should be added to. + Attachments param.Field[[]BetaThreadMessageNewParamsAttachment] `json:"attachments"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadMessageNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The text contents of the message. +// +// Satisfied by [shared.UnionString], +// [BetaThreadMessageNewParamsContentArrayOfContentParts]. +type BetaThreadMessageNewParamsContentUnion interface { + ImplementsBetaThreadMessageNewParamsContentUnion() +} + +type BetaThreadMessageNewParamsContentArrayOfContentParts []MessageContentPartParamUnion + +func (r BetaThreadMessageNewParamsContentArrayOfContentParts) ImplementsBetaThreadMessageNewParamsContentUnion() { +} + +// The role of the entity that is creating the message. Allowed values include: +// +// - `user`: Indicates the message is sent by an actual user and should be used in +// most cases to represent user-generated messages. +// - `assistant`: Indicates the message is generated by the assistant. Use this +// value to insert messages from the assistant into the conversation. +type BetaThreadMessageNewParamsRole string + +const ( + BetaThreadMessageNewParamsRoleUser BetaThreadMessageNewParamsRole = "user" + BetaThreadMessageNewParamsRoleAssistant BetaThreadMessageNewParamsRole = "assistant" +) + +func (r BetaThreadMessageNewParamsRole) IsKnown() bool { + switch r { + case BetaThreadMessageNewParamsRoleUser, BetaThreadMessageNewParamsRoleAssistant: + return true + } + return false +} + +type BetaThreadMessageNewParamsAttachment struct { + // The ID of the file to attach to the message. + FileID param.Field[string] `json:"file_id"` + // The tools to add this file to. + Tools param.Field[[]BetaThreadMessageNewParamsAttachmentsToolUnion] `json:"tools"` +} + +func (r BetaThreadMessageNewParamsAttachment) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadMessageNewParamsAttachmentsTool struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[BetaThreadMessageNewParamsAttachmentsToolsType] `json:"type,required"` +} + +func (r BetaThreadMessageNewParamsAttachmentsTool) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadMessageNewParamsAttachmentsTool) implementsBetaThreadMessageNewParamsAttachmentsToolUnion() { +} + +// Satisfied by [CodeInterpreterToolParam], +// [BetaThreadMessageNewParamsAttachmentsToolsFileSearch], +// [BetaThreadMessageNewParamsAttachmentsTool]. +type BetaThreadMessageNewParamsAttachmentsToolUnion interface { + implementsBetaThreadMessageNewParamsAttachmentsToolUnion() +} + +type BetaThreadMessageNewParamsAttachmentsToolsFileSearch struct { + // The type of tool being defined: `file_search` + Type param.Field[BetaThreadMessageNewParamsAttachmentsToolsFileSearchType] `json:"type,required"` +} + +func (r BetaThreadMessageNewParamsAttachmentsToolsFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadMessageNewParamsAttachmentsToolsFileSearch) implementsBetaThreadMessageNewParamsAttachmentsToolUnion() { +} + +// The type of tool being defined: `file_search` +type BetaThreadMessageNewParamsAttachmentsToolsFileSearchType string + +const ( + BetaThreadMessageNewParamsAttachmentsToolsFileSearchTypeFileSearch BetaThreadMessageNewParamsAttachmentsToolsFileSearchType = "file_search" +) + +func (r BetaThreadMessageNewParamsAttachmentsToolsFileSearchType) IsKnown() bool { + switch r { + case BetaThreadMessageNewParamsAttachmentsToolsFileSearchTypeFileSearch: + return true + } + return false +} + +// The type of tool being defined: `code_interpreter` +type BetaThreadMessageNewParamsAttachmentsToolsType string + +const ( + BetaThreadMessageNewParamsAttachmentsToolsTypeCodeInterpreter BetaThreadMessageNewParamsAttachmentsToolsType = "code_interpreter" + BetaThreadMessageNewParamsAttachmentsToolsTypeFileSearch BetaThreadMessageNewParamsAttachmentsToolsType = "file_search" +) + +func (r BetaThreadMessageNewParamsAttachmentsToolsType) IsKnown() bool { + switch r { + case BetaThreadMessageNewParamsAttachmentsToolsTypeCodeInterpreter, BetaThreadMessageNewParamsAttachmentsToolsTypeFileSearch: + return true + } + return false +} + +type BetaThreadMessageUpdateParams struct { + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadMessageUpdateParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadMessageListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaThreadMessageListParamsOrder] `query:"order"` + // Filter messages by the run ID that generated them. + RunID param.Field[string] `query:"run_id"` +} + +// URLQuery serializes [BetaThreadMessageListParams]'s query parameters as +// `url.Values`. +func (r BetaThreadMessageListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaThreadMessageListParamsOrder string + +const ( + BetaThreadMessageListParamsOrderAsc BetaThreadMessageListParamsOrder = "asc" + BetaThreadMessageListParamsOrderDesc BetaThreadMessageListParamsOrder = "desc" +) + +func (r BetaThreadMessageListParamsOrder) IsKnown() bool { + switch r { + case BetaThreadMessageListParamsOrderAsc, BetaThreadMessageListParamsOrderDesc: + return true + } + return false +} diff --git a/betathreadmessage_test.go b/betathreadmessage_test.go new file mode 100644 index 0000000..b3ec662 --- /dev/null +++ b/betathreadmessage_test.go @@ -0,0 +1,186 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestBetaThreadMessageNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Messages.New( + context.TODO(), + "thread_id", + openai.BetaThreadMessageNewParams{ + Content: openai.F[openai.BetaThreadMessageNewParamsContentUnion](shared.UnionString("string")), + Role: openai.F(openai.BetaThreadMessageNewParamsRoleUser), + Attachments: openai.F([]openai.BetaThreadMessageNewParamsAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadMessageNewParamsAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadMessageNewParamsAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadMessageNewParamsAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadMessageGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Messages.Get( + context.TODO(), + "thread_id", + "message_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadMessageUpdateWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Messages.Update( + context.TODO(), + "thread_id", + "message_id", + openai.BetaThreadMessageUpdateParams{ + Metadata: openai.F[any](map[string]interface{}{}), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadMessageListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Messages.List( + context.TODO(), + "thread_id", + openai.BetaThreadMessageListParams{ + After: openai.F("after"), + Before: openai.F("before"), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaThreadMessageListParamsOrderAsc), + RunID: openai.F("run_id"), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadMessageDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Messages.Delete( + context.TODO(), + "thread_id", + "message_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betathreadrun.go b/betathreadrun.go new file mode 100644 index 0000000..507dd30 --- /dev/null +++ b/betathreadrun.go @@ -0,0 +1,1039 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/packages/ssestream" +) + +// BetaThreadRunService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaThreadRunService] method instead. +type BetaThreadRunService struct { + Options []option.RequestOption + Steps *BetaThreadRunStepService +} + +// NewBetaThreadRunService generates a new service that applies the given options +// to each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewBetaThreadRunService(opts ...option.RequestOption) (r *BetaThreadRunService) { + r = &BetaThreadRunService{} + r.Options = opts + r.Steps = NewBetaThreadRunStepService(opts...) + return +} + +// Create a run. +func (r *BetaThreadRunService) New(ctx context.Context, threadID string, body BetaThreadRunNewParams, opts ...option.RequestOption) (res *Run, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs", threadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Create a run. +func (r *BetaThreadRunService) NewStreaming(ctx context.Context, threadID string, body BetaThreadRunNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEvent]) { + var ( + raw *http.Response + err error + ) + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs", threadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...) + return ssestream.NewStream[AssistantStreamEvent](ssestream.NewDecoder(raw), err) +} + +// Retrieves a run. +func (r *BetaThreadRunService) Get(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s", threadID, runID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Modifies a run. +func (r *BetaThreadRunService) Update(ctx context.Context, threadID string, runID string, body BetaThreadRunUpdateParams, opts ...option.RequestOption) (res *Run, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s", threadID, runID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Returns a list of runs belonging to a thread. +func (r *BetaThreadRunService) List(ctx context.Context, threadID string, query BetaThreadRunListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Run], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs", threadID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of runs belonging to a thread. +func (r *BetaThreadRunService) ListAutoPaging(ctx context.Context, threadID string, query BetaThreadRunListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[Run] { + return pagination.NewCursorPageAutoPager(r.List(ctx, threadID, query, opts...)) +} + +// Cancels a run that is `in_progress`. +func (r *BetaThreadRunService) Cancel(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s/cancel", threadID, runID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +// When a run has the `status: "requires_action"` and `required_action.type` is +// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the +// tool calls once they're all completed. All outputs must be submitted in a single +// request. +func (r *BetaThreadRunService) SubmitToolOutputs(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (res *Run, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s/submit_tool_outputs", threadID, runID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// When a run has the `status: "requires_action"` and `required_action.type` is +// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the +// tool calls once they're all completed. All outputs must be submitted in a single +// request. +func (r *BetaThreadRunService) SubmitToolOutputsStreaming(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEvent]) { + var ( + raw *http.Response + err error + ) + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s/submit_tool_outputs", threadID, runID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...) + return ssestream.NewStream[AssistantStreamEvent](ssestream.NewDecoder(raw), err) +} + +// Tool call objects +type RequiredActionFunctionToolCall struct { + // The ID of the tool call. This ID must be referenced when you submit the tool + // outputs in using the + // [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + // endpoint. + ID string `json:"id,required"` + // The function definition. + Function RequiredActionFunctionToolCallFunction `json:"function,required"` + // The type of tool call the output is required for. For now, this is always + // `function`. + Type RequiredActionFunctionToolCallType `json:"type,required"` + JSON requiredActionFunctionToolCallJSON `json:"-"` +} + +// requiredActionFunctionToolCallJSON contains the JSON metadata for the struct +// [RequiredActionFunctionToolCall] +type requiredActionFunctionToolCallJSON struct { + ID apijson.Field + Function apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RequiredActionFunctionToolCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r requiredActionFunctionToolCallJSON) RawJSON() string { + return r.raw +} + +// The function definition. +type RequiredActionFunctionToolCallFunction struct { + // The arguments that the model expects you to pass to the function. + Arguments string `json:"arguments,required"` + // The name of the function. + Name string `json:"name,required"` + JSON requiredActionFunctionToolCallFunctionJSON `json:"-"` +} + +// requiredActionFunctionToolCallFunctionJSON contains the JSON metadata for the +// struct [RequiredActionFunctionToolCallFunction] +type requiredActionFunctionToolCallFunctionJSON struct { + Arguments apijson.Field + Name apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RequiredActionFunctionToolCallFunction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r requiredActionFunctionToolCallFunctionJSON) RawJSON() string { + return r.raw +} + +// The type of tool call the output is required for. For now, this is always +// `function`. +type RequiredActionFunctionToolCallType string + +const ( + RequiredActionFunctionToolCallTypeFunction RequiredActionFunctionToolCallType = "function" +) + +func (r RequiredActionFunctionToolCallType) IsKnown() bool { + switch r { + case RequiredActionFunctionToolCallTypeFunction: + return true + } + return false +} + +// Represents an execution run on a +// [thread](https://platform.openai.com/docs/api-reference/threads). +type Run struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The ID of the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + // execution of this run. + AssistantID string `json:"assistant_id,required"` + // The Unix timestamp (in seconds) for when the run was cancelled. + CancelledAt int64 `json:"cancelled_at,required,nullable"` + // The Unix timestamp (in seconds) for when the run was completed. + CompletedAt int64 `json:"completed_at,required,nullable"` + // The Unix timestamp (in seconds) for when the run was created. + CreatedAt int64 `json:"created_at,required"` + // The Unix timestamp (in seconds) for when the run will expire. + ExpiresAt int64 `json:"expires_at,required,nullable"` + // The Unix timestamp (in seconds) for when the run failed. + FailedAt int64 `json:"failed_at,required,nullable"` + // Details on why the run is incomplete. Will be `null` if the run is not + // incomplete. + IncompleteDetails RunIncompleteDetails `json:"incomplete_details,required,nullable"` + // The instructions that the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + // this run. + Instructions string `json:"instructions,required"` + // The last error associated with this run. Will be `null` if there are no errors. + LastError RunLastError `json:"last_error,required,nullable"` + // The maximum number of completion tokens specified to have been used over the + // course of the run. + MaxCompletionTokens int64 `json:"max_completion_tokens,required,nullable"` + // The maximum number of prompt tokens specified to have been used over the course + // of the run. + MaxPromptTokens int64 `json:"max_prompt_tokens,required,nullable"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,required,nullable"` + // The model that the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + // this run. + Model string `json:"model,required"` + // The object type, which is always `thread.run`. + Object RunObject `json:"object,required"` + // Whether to enable + // [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + // during tool use. + ParallelToolCalls bool `json:"parallel_tool_calls,required"` + // Details on the action required to continue the run. Will be `null` if no action + // is required. + RequiredAction RunRequiredAction `json:"required_action,required,nullable"` + // Specifies the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,required,nullable"` + // The Unix timestamp (in seconds) for when the run was started. + StartedAt int64 `json:"started_at,required,nullable"` + // The status of the run, which can be either `queued`, `in_progress`, + // `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + // `incomplete`, or `expired`. + Status RunStatus `json:"status,required"` + // The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + // that was executed on as a part of this run. + ThreadID string `json:"thread_id,required"` + // Controls which (if any) tool is called by the model. `none` means the model will + // not call any tools and instead generates a message. `auto` is the default value + // and means the model can pick between generating a message or calling one or more + // tools. `required` means the model must call one or more tools before responding + // to the user. Specifying a particular tool like `{"type": "file_search"}` or + // `{"type": "function", "function": {"name": "my_function"}}` forces the model to + // call that tool. + ToolChoice AssistantToolChoiceOptionUnion `json:"tool_choice,required,nullable"` + // The list of tools that the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + // this run. + Tools []AssistantTool `json:"tools,required"` + // Controls for how a thread will be truncated prior to the run. Use this to + // control the intial context window of the run. + TruncationStrategy RunTruncationStrategy `json:"truncation_strategy,required,nullable"` + // Usage statistics related to the run. This value will be `null` if the run is not + // in a terminal state (i.e. `in_progress`, `queued`, etc.). + Usage RunUsage `json:"usage,required,nullable"` + // The sampling temperature used for this run. If not set, defaults to 1. + Temperature float64 `json:"temperature,nullable"` + // The nucleus sampling value used for this run. If not set, defaults to 1. + TopP float64 `json:"top_p,nullable"` + JSON runJSON `json:"-"` +} + +// runJSON contains the JSON metadata for the struct [Run] +type runJSON struct { + ID apijson.Field + AssistantID apijson.Field + CancelledAt apijson.Field + CompletedAt apijson.Field + CreatedAt apijson.Field + ExpiresAt apijson.Field + FailedAt apijson.Field + IncompleteDetails apijson.Field + Instructions apijson.Field + LastError apijson.Field + MaxCompletionTokens apijson.Field + MaxPromptTokens apijson.Field + Metadata apijson.Field + Model apijson.Field + Object apijson.Field + ParallelToolCalls apijson.Field + RequiredAction apijson.Field + ResponseFormat apijson.Field + StartedAt apijson.Field + Status apijson.Field + ThreadID apijson.Field + ToolChoice apijson.Field + Tools apijson.Field + TruncationStrategy apijson.Field + Usage apijson.Field + Temperature apijson.Field + TopP apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Run) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runJSON) RawJSON() string { + return r.raw +} + +// Details on why the run is incomplete. Will be `null` if the run is not +// incomplete. +type RunIncompleteDetails struct { + // The reason why the run is incomplete. This will point to which specific token + // limit was reached over the course of the run. + Reason RunIncompleteDetailsReason `json:"reason"` + JSON runIncompleteDetailsJSON `json:"-"` +} + +// runIncompleteDetailsJSON contains the JSON metadata for the struct +// [RunIncompleteDetails] +type runIncompleteDetailsJSON struct { + Reason apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunIncompleteDetails) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runIncompleteDetailsJSON) RawJSON() string { + return r.raw +} + +// The reason why the run is incomplete. This will point to which specific token +// limit was reached over the course of the run. +type RunIncompleteDetailsReason string + +const ( + RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens" + RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens" +) + +func (r RunIncompleteDetailsReason) IsKnown() bool { + switch r { + case RunIncompleteDetailsReasonMaxCompletionTokens, RunIncompleteDetailsReasonMaxPromptTokens: + return true + } + return false +} + +// The last error associated with this run. Will be `null` if there are no errors. +type RunLastError struct { + // One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + Code RunLastErrorCode `json:"code,required"` + // A human-readable description of the error. + Message string `json:"message,required"` + JSON runLastErrorJSON `json:"-"` +} + +// runLastErrorJSON contains the JSON metadata for the struct [RunLastError] +type runLastErrorJSON struct { + Code apijson.Field + Message apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunLastError) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runLastErrorJSON) RawJSON() string { + return r.raw +} + +// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. +type RunLastErrorCode string + +const ( + RunLastErrorCodeServerError RunLastErrorCode = "server_error" + RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded" + RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt" +) + +func (r RunLastErrorCode) IsKnown() bool { + switch r { + case RunLastErrorCodeServerError, RunLastErrorCodeRateLimitExceeded, RunLastErrorCodeInvalidPrompt: + return true + } + return false +} + +// The object type, which is always `thread.run`. +type RunObject string + +const ( + RunObjectThreadRun RunObject = "thread.run" +) + +func (r RunObject) IsKnown() bool { + switch r { + case RunObjectThreadRun: + return true + } + return false +} + +// Details on the action required to continue the run. Will be `null` if no action +// is required. +type RunRequiredAction struct { + // Details on the tool outputs needed for this run to continue. + SubmitToolOutputs RunRequiredActionSubmitToolOutputs `json:"submit_tool_outputs,required"` + // For now, this is always `submit_tool_outputs`. + Type RunRequiredActionType `json:"type,required"` + JSON runRequiredActionJSON `json:"-"` +} + +// runRequiredActionJSON contains the JSON metadata for the struct +// [RunRequiredAction] +type runRequiredActionJSON struct { + SubmitToolOutputs apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunRequiredAction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runRequiredActionJSON) RawJSON() string { + return r.raw +} + +// Details on the tool outputs needed for this run to continue. +type RunRequiredActionSubmitToolOutputs struct { + // A list of the relevant tool calls. + ToolCalls []RequiredActionFunctionToolCall `json:"tool_calls,required"` + JSON runRequiredActionSubmitToolOutputsJSON `json:"-"` +} + +// runRequiredActionSubmitToolOutputsJSON contains the JSON metadata for the struct +// [RunRequiredActionSubmitToolOutputs] +type runRequiredActionSubmitToolOutputsJSON struct { + ToolCalls apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunRequiredActionSubmitToolOutputs) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runRequiredActionSubmitToolOutputsJSON) RawJSON() string { + return r.raw +} + +// For now, this is always `submit_tool_outputs`. +type RunRequiredActionType string + +const ( + RunRequiredActionTypeSubmitToolOutputs RunRequiredActionType = "submit_tool_outputs" +) + +func (r RunRequiredActionType) IsKnown() bool { + switch r { + case RunRequiredActionTypeSubmitToolOutputs: + return true + } + return false +} + +// Controls for how a thread will be truncated prior to the run. Use this to +// control the intial context window of the run. +type RunTruncationStrategy struct { + // The truncation strategy to use for the thread. The default is `auto`. If set to + // `last_messages`, the thread will be truncated to the n most recent messages in + // the thread. When set to `auto`, messages in the middle of the thread will be + // dropped to fit the context length of the model, `max_prompt_tokens`. + Type RunTruncationStrategyType `json:"type,required"` + // The number of most recent messages from the thread when constructing the context + // for the run. + LastMessages int64 `json:"last_messages,nullable"` + JSON runTruncationStrategyJSON `json:"-"` +} + +// runTruncationStrategyJSON contains the JSON metadata for the struct +// [RunTruncationStrategy] +type runTruncationStrategyJSON struct { + Type apijson.Field + LastMessages apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunTruncationStrategy) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runTruncationStrategyJSON) RawJSON() string { + return r.raw +} + +// The truncation strategy to use for the thread. The default is `auto`. If set to +// `last_messages`, the thread will be truncated to the n most recent messages in +// the thread. When set to `auto`, messages in the middle of the thread will be +// dropped to fit the context length of the model, `max_prompt_tokens`. +type RunTruncationStrategyType string + +const ( + RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto" + RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages" +) + +func (r RunTruncationStrategyType) IsKnown() bool { + switch r { + case RunTruncationStrategyTypeAuto, RunTruncationStrategyTypeLastMessages: + return true + } + return false +} + +// Usage statistics related to the run. This value will be `null` if the run is not +// in a terminal state (i.e. `in_progress`, `queued`, etc.). +type RunUsage struct { + // Number of completion tokens used over the course of the run. + CompletionTokens int64 `json:"completion_tokens,required"` + // Number of prompt tokens used over the course of the run. + PromptTokens int64 `json:"prompt_tokens,required"` + // Total number of tokens used (prompt + completion). + TotalTokens int64 `json:"total_tokens,required"` + JSON runUsageJSON `json:"-"` +} + +// runUsageJSON contains the JSON metadata for the struct [RunUsage] +type runUsageJSON struct { + CompletionTokens apijson.Field + PromptTokens apijson.Field + TotalTokens apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunUsage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runUsageJSON) RawJSON() string { + return r.raw +} + +// The status of the run, which can be either `queued`, `in_progress`, +// `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, +// `incomplete`, or `expired`. +type RunStatus string + +const ( + RunStatusQueued RunStatus = "queued" + RunStatusInProgress RunStatus = "in_progress" + RunStatusRequiresAction RunStatus = "requires_action" + RunStatusCancelling RunStatus = "cancelling" + RunStatusCancelled RunStatus = "cancelled" + RunStatusFailed RunStatus = "failed" + RunStatusCompleted RunStatus = "completed" + RunStatusIncomplete RunStatus = "incomplete" + RunStatusExpired RunStatus = "expired" +) + +func (r RunStatus) IsKnown() bool { + switch r { + case RunStatusQueued, RunStatusInProgress, RunStatusRequiresAction, RunStatusCancelling, RunStatusCancelled, RunStatusFailed, RunStatusCompleted, RunStatusIncomplete, RunStatusExpired: + return true + } + return false +} + +type BetaThreadRunNewParams struct { + // The ID of the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + // execute this run. + AssistantID param.Field[string] `json:"assistant_id,required"` + // Appends additional instructions at the end of the instructions for the run. This + // is useful for modifying the behavior on a per-run basis without overriding other + // instructions. + AdditionalInstructions param.Field[string] `json:"additional_instructions"` + // Adds additional messages to the thread before creating the run. + AdditionalMessages param.Field[[]BetaThreadRunNewParamsAdditionalMessage] `json:"additional_messages"` + // Overrides the + // [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + // of the assistant. This is useful for modifying the behavior on a per-run basis. + Instructions param.Field[string] `json:"instructions"` + // The maximum number of completion tokens that may be used over the course of the + // run. The run will make a best effort to use only the number of completion tokens + // specified, across multiple turns of the run. If the run exceeds the number of + // completion tokens specified, the run will end with status `incomplete`. See + // `incomplete_details` for more info. + MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"` + // The maximum number of prompt tokens that may be used over the course of the run. + // The run will make a best effort to use only the number of prompt tokens + // specified, across multiple turns of the run. If the run exceeds the number of + // prompt tokens specified, the run will end with status `incomplete`. See + // `incomplete_details` for more info. + MaxPromptTokens param.Field[int64] `json:"max_prompt_tokens"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + // be used to execute this run. If a value is provided here, it will override the + // model associated with the assistant. If not, the model associated with the + // assistant will be used. + Model param.Field[BetaThreadRunNewParamsModel] `json:"model"` + // Whether to enable + // [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + // during tool use. + ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"` + // Specifies the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + Temperature param.Field[float64] `json:"temperature"` + // Controls which (if any) tool is called by the model. `none` means the model will + // not call any tools and instead generates a message. `auto` is the default value + // and means the model can pick between generating a message or calling one or more + // tools. `required` means the model must call one or more tools before responding + // to the user. Specifying a particular tool like `{"type": "file_search"}` or + // `{"type": "function", "function": {"name": "my_function"}}` forces the model to + // call that tool. + ToolChoice param.Field[AssistantToolChoiceOptionUnionParam] `json:"tool_choice"` + // Override the tools the assistant can use for this run. This is useful for + // modifying the behavior on a per-run basis. + Tools param.Field[[]AssistantToolUnionParam] `json:"tools"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or temperature but not both. + TopP param.Field[float64] `json:"top_p"` + // Controls for how a thread will be truncated prior to the run. Use this to + // control the intial context window of the run. + TruncationStrategy param.Field[BetaThreadRunNewParamsTruncationStrategy] `json:"truncation_strategy"` +} + +func (r BetaThreadRunNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadRunNewParamsAdditionalMessage struct { + // The text contents of the message. + Content param.Field[BetaThreadRunNewParamsAdditionalMessagesContentUnion] `json:"content,required"` + // The role of the entity that is creating the message. Allowed values include: + // + // - `user`: Indicates the message is sent by an actual user and should be used in + // most cases to represent user-generated messages. + // - `assistant`: Indicates the message is generated by the assistant. Use this + // value to insert messages from the assistant into the conversation. + Role param.Field[BetaThreadRunNewParamsAdditionalMessagesRole] `json:"role,required"` + // A list of files attached to the message, and the tools they should be added to. + Attachments param.Field[[]BetaThreadRunNewParamsAdditionalMessagesAttachment] `json:"attachments"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadRunNewParamsAdditionalMessage) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The text contents of the message. +// +// Satisfied by [shared.UnionString], +// [BetaThreadRunNewParamsAdditionalMessagesContentArrayOfContentParts]. +type BetaThreadRunNewParamsAdditionalMessagesContentUnion interface { + ImplementsBetaThreadRunNewParamsAdditionalMessagesContentUnion() +} + +type BetaThreadRunNewParamsAdditionalMessagesContentArrayOfContentParts []MessageContentPartParamUnion + +func (r BetaThreadRunNewParamsAdditionalMessagesContentArrayOfContentParts) ImplementsBetaThreadRunNewParamsAdditionalMessagesContentUnion() { +} + +// The role of the entity that is creating the message. Allowed values include: +// +// - `user`: Indicates the message is sent by an actual user and should be used in +// most cases to represent user-generated messages. +// - `assistant`: Indicates the message is generated by the assistant. Use this +// value to insert messages from the assistant into the conversation. +type BetaThreadRunNewParamsAdditionalMessagesRole string + +const ( + BetaThreadRunNewParamsAdditionalMessagesRoleUser BetaThreadRunNewParamsAdditionalMessagesRole = "user" + BetaThreadRunNewParamsAdditionalMessagesRoleAssistant BetaThreadRunNewParamsAdditionalMessagesRole = "assistant" +) + +func (r BetaThreadRunNewParamsAdditionalMessagesRole) IsKnown() bool { + switch r { + case BetaThreadRunNewParamsAdditionalMessagesRoleUser, BetaThreadRunNewParamsAdditionalMessagesRoleAssistant: + return true + } + return false +} + +type BetaThreadRunNewParamsAdditionalMessagesAttachment struct { + // The ID of the file to attach to the message. + FileID param.Field[string] `json:"file_id"` + // The tools to add this file to. + Tools param.Field[[]BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion] `json:"tools"` +} + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachment) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool struct { + // The type of tool being defined: `code_interpreter` + Type param.Field[BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType] `json:"type,required"` +} + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool) implementsBetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion() { +} + +// Satisfied by [CodeInterpreterToolParam], +// [BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch], +// [BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool]. +type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion interface { + implementsBetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion() +} + +type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch struct { + // The type of tool being defined: `file_search` + Type param.Field[BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType] `json:"type,required"` +} + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch) implementsBetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion() { +} + +// The type of tool being defined: `file_search` +type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType string + +const ( + BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchTypeFileSearch BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType = "file_search" +) + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType) IsKnown() bool { + switch r { + case BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchTypeFileSearch: + return true + } + return false +} + +// The type of tool being defined: `code_interpreter` +type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType string + +const ( + BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsTypeCodeInterpreter BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType = "code_interpreter" + BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsTypeFileSearch BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType = "file_search" +) + +func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType) IsKnown() bool { + switch r { + case BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsTypeCodeInterpreter, BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsTypeFileSearch: + return true + } + return false +} + +type BetaThreadRunNewParamsModel string + +const ( + BetaThreadRunNewParamsModelGPT4o BetaThreadRunNewParamsModel = "gpt-4o" + BetaThreadRunNewParamsModelGPT4o2024_05_13 BetaThreadRunNewParamsModel = "gpt-4o-2024-05-13" + BetaThreadRunNewParamsModelGPT4oMini BetaThreadRunNewParamsModel = "gpt-4o-mini" + BetaThreadRunNewParamsModelGPT4oMini2024_07_18 BetaThreadRunNewParamsModel = "gpt-4o-mini-2024-07-18" + BetaThreadRunNewParamsModelGPT4Turbo BetaThreadRunNewParamsModel = "gpt-4-turbo" + BetaThreadRunNewParamsModelGPT4Turbo2024_04_09 BetaThreadRunNewParamsModel = "gpt-4-turbo-2024-04-09" + BetaThreadRunNewParamsModelGPT4_0125Preview BetaThreadRunNewParamsModel = "gpt-4-0125-preview" + BetaThreadRunNewParamsModelGPT4TurboPreview BetaThreadRunNewParamsModel = "gpt-4-turbo-preview" + BetaThreadRunNewParamsModelGPT4_1106Preview BetaThreadRunNewParamsModel = "gpt-4-1106-preview" + BetaThreadRunNewParamsModelGPT4VisionPreview BetaThreadRunNewParamsModel = "gpt-4-vision-preview" + BetaThreadRunNewParamsModelGPT4 BetaThreadRunNewParamsModel = "gpt-4" + BetaThreadRunNewParamsModelGPT4_0314 BetaThreadRunNewParamsModel = "gpt-4-0314" + BetaThreadRunNewParamsModelGPT4_0613 BetaThreadRunNewParamsModel = "gpt-4-0613" + BetaThreadRunNewParamsModelGPT4_32k BetaThreadRunNewParamsModel = "gpt-4-32k" + BetaThreadRunNewParamsModelGPT4_32k0314 BetaThreadRunNewParamsModel = "gpt-4-32k-0314" + BetaThreadRunNewParamsModelGPT4_32k0613 BetaThreadRunNewParamsModel = "gpt-4-32k-0613" + BetaThreadRunNewParamsModelGPT3_5Turbo BetaThreadRunNewParamsModel = "gpt-3.5-turbo" + BetaThreadRunNewParamsModelGPT3_5Turbo16k BetaThreadRunNewParamsModel = "gpt-3.5-turbo-16k" + BetaThreadRunNewParamsModelGPT3_5Turbo0613 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-0613" + BetaThreadRunNewParamsModelGPT3_5Turbo1106 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-1106" + BetaThreadRunNewParamsModelGPT3_5Turbo0125 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-0125" + BetaThreadRunNewParamsModelGPT3_5Turbo16k0613 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-16k-0613" +) + +func (r BetaThreadRunNewParamsModel) IsKnown() bool { + switch r { + case BetaThreadRunNewParamsModelGPT4o, BetaThreadRunNewParamsModelGPT4o2024_05_13, BetaThreadRunNewParamsModelGPT4oMini, BetaThreadRunNewParamsModelGPT4oMini2024_07_18, BetaThreadRunNewParamsModelGPT4Turbo, BetaThreadRunNewParamsModelGPT4Turbo2024_04_09, BetaThreadRunNewParamsModelGPT4_0125Preview, BetaThreadRunNewParamsModelGPT4TurboPreview, BetaThreadRunNewParamsModelGPT4_1106Preview, BetaThreadRunNewParamsModelGPT4VisionPreview, BetaThreadRunNewParamsModelGPT4, BetaThreadRunNewParamsModelGPT4_0314, BetaThreadRunNewParamsModelGPT4_0613, BetaThreadRunNewParamsModelGPT4_32k, BetaThreadRunNewParamsModelGPT4_32k0314, BetaThreadRunNewParamsModelGPT4_32k0613, BetaThreadRunNewParamsModelGPT3_5Turbo, BetaThreadRunNewParamsModelGPT3_5Turbo16k, BetaThreadRunNewParamsModelGPT3_5Turbo0613, BetaThreadRunNewParamsModelGPT3_5Turbo1106, BetaThreadRunNewParamsModelGPT3_5Turbo0125, BetaThreadRunNewParamsModelGPT3_5Turbo16k0613: + return true + } + return false +} + +// Controls for how a thread will be truncated prior to the run. Use this to +// control the intial context window of the run. +type BetaThreadRunNewParamsTruncationStrategy struct { + // The truncation strategy to use for the thread. The default is `auto`. If set to + // `last_messages`, the thread will be truncated to the n most recent messages in + // the thread. When set to `auto`, messages in the middle of the thread will be + // dropped to fit the context length of the model, `max_prompt_tokens`. + Type param.Field[BetaThreadRunNewParamsTruncationStrategyType] `json:"type,required"` + // The number of most recent messages from the thread when constructing the context + // for the run. + LastMessages param.Field[int64] `json:"last_messages"` +} + +func (r BetaThreadRunNewParamsTruncationStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The truncation strategy to use for the thread. The default is `auto`. If set to +// `last_messages`, the thread will be truncated to the n most recent messages in +// the thread. When set to `auto`, messages in the middle of the thread will be +// dropped to fit the context length of the model, `max_prompt_tokens`. +type BetaThreadRunNewParamsTruncationStrategyType string + +const ( + BetaThreadRunNewParamsTruncationStrategyTypeAuto BetaThreadRunNewParamsTruncationStrategyType = "auto" + BetaThreadRunNewParamsTruncationStrategyTypeLastMessages BetaThreadRunNewParamsTruncationStrategyType = "last_messages" +) + +func (r BetaThreadRunNewParamsTruncationStrategyType) IsKnown() bool { + switch r { + case BetaThreadRunNewParamsTruncationStrategyTypeAuto, BetaThreadRunNewParamsTruncationStrategyTypeLastMessages: + return true + } + return false +} + +type BetaThreadRunUpdateParams struct { + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` +} + +func (r BetaThreadRunUpdateParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadRunListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaThreadRunListParamsOrder] `query:"order"` +} + +// URLQuery serializes [BetaThreadRunListParams]'s query parameters as +// `url.Values`. +func (r BetaThreadRunListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaThreadRunListParamsOrder string + +const ( + BetaThreadRunListParamsOrderAsc BetaThreadRunListParamsOrder = "asc" + BetaThreadRunListParamsOrderDesc BetaThreadRunListParamsOrder = "desc" +) + +func (r BetaThreadRunListParamsOrder) IsKnown() bool { + switch r { + case BetaThreadRunListParamsOrderAsc, BetaThreadRunListParamsOrderDesc: + return true + } + return false +} + +type BetaThreadRunSubmitToolOutputsParams struct { + // A list of tools for which the outputs are being submitted. + ToolOutputs param.Field[[]BetaThreadRunSubmitToolOutputsParamsToolOutput] `json:"tool_outputs,required"` +} + +func (r BetaThreadRunSubmitToolOutputsParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type BetaThreadRunSubmitToolOutputsParamsToolOutput struct { + // The output of the tool call to be submitted to continue the run. + Output param.Field[string] `json:"output"` + // The ID of the tool call in the `required_action` object within the run object + // the output is being submitted for. + ToolCallID param.Field[string] `json:"tool_call_id"` +} + +func (r BetaThreadRunSubmitToolOutputsParamsToolOutput) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} diff --git a/betathreadrun_test.go b/betathreadrun_test.go new file mode 100644 index 0000000..791e429 --- /dev/null +++ b/betathreadrun_test.go @@ -0,0 +1,312 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestBetaThreadRunNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.New( + context.TODO(), + "thread_id", + openai.BetaThreadRunNewParams{ + AssistantID: openai.F("assistant_id"), + AdditionalInstructions: openai.F("additional_instructions"), + AdditionalMessages: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessage{{ + Role: openai.F(openai.BetaThreadRunNewParamsAdditionalMessagesRoleUser), + Content: openai.F[openai.BetaThreadRunNewParamsAdditionalMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, { + Role: openai.F(openai.BetaThreadRunNewParamsAdditionalMessagesRoleUser), + Content: openai.F[openai.BetaThreadRunNewParamsAdditionalMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }, { + Role: openai.F(openai.BetaThreadRunNewParamsAdditionalMessagesRoleUser), + Content: openai.F[openai.BetaThreadRunNewParamsAdditionalMessagesContentUnion](shared.UnionString("string")), + Attachments: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachment{{ + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }, { + FileID: openai.F("file_id"), + Tools: openai.F([]openai.BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + }}), + Metadata: openai.F[any](map[string]interface{}{}), + }}), + Instructions: openai.F("instructions"), + MaxCompletionTokens: openai.F(int64(256)), + MaxPromptTokens: openai.F(int64(256)), + Metadata: openai.F[any](map[string]interface{}{}), + Model: openai.F(openai.BetaThreadRunNewParamsModelGPT4o), + ParallelToolCalls: openai.F(true), + ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), + Temperature: openai.F(1.000000), + ToolChoice: openai.F[openai.AssistantToolChoiceOptionUnionParam](openai.AssistantToolChoiceOptionString(openai.AssistantToolChoiceOptionStringNone)), + Tools: openai.F([]openai.AssistantToolUnionParam{openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }, openai.CodeInterpreterToolParam{ + Type: openai.F(openai.CodeInterpreterToolTypeCodeInterpreter), + }}), + TopP: openai.F(1.000000), + TruncationStrategy: openai.F(openai.BetaThreadRunNewParamsTruncationStrategy{ + Type: openai.F(openai.BetaThreadRunNewParamsTruncationStrategyTypeAuto), + LastMessages: openai.F(int64(1)), + }), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadRunGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.Get( + context.TODO(), + "thread_id", + "run_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadRunUpdateWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.Update( + context.TODO(), + "thread_id", + "run_id", + openai.BetaThreadRunUpdateParams{ + Metadata: openai.F[any](map[string]interface{}{}), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadRunListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.List( + context.TODO(), + "thread_id", + openai.BetaThreadRunListParams{ + After: openai.F("after"), + Before: openai.F("before"), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaThreadRunListParamsOrderAsc), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadRunCancel(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.Cancel( + context.TODO(), + "thread_id", + "run_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadRunSubmitToolOutputsWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.SubmitToolOutputs( + context.TODO(), + "thread_id", + "run_id", + openai.BetaThreadRunSubmitToolOutputsParams{ + ToolOutputs: openai.F([]openai.BetaThreadRunSubmitToolOutputsParamsToolOutput{{ + ToolCallID: openai.F("tool_call_id"), + Output: openai.F("output"), + }, { + ToolCallID: openai.F("tool_call_id"), + Output: openai.F("output"), + }, { + ToolCallID: openai.F("tool_call_id"), + Output: openai.F("output"), + }}), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betathreadrunstep.go b/betathreadrunstep.go new file mode 100644 index 0000000..e383e4e --- /dev/null +++ b/betathreadrunstep.go @@ -0,0 +1,1800 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/tidwall/gjson" +) + +// BetaThreadRunStepService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaThreadRunStepService] method instead. +type BetaThreadRunStepService struct { + Options []option.RequestOption +} + +// NewBetaThreadRunStepService generates a new service that applies the given +// options to each request. These options are applied after the parent client's +// options (if there is one), and before any request-specific options. +func NewBetaThreadRunStepService(opts ...option.RequestOption) (r *BetaThreadRunStepService) { + r = &BetaThreadRunStepService{} + r.Options = opts + return +} + +// Retrieves a run step. +func (r *BetaThreadRunStepService) Get(ctx context.Context, threadID string, runID string, stepID string, opts ...option.RequestOption) (res *RunStep, err error) { + opts = append(r.Options[:], opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + if stepID == "" { + err = errors.New("missing required step_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s/steps/%s", threadID, runID, stepID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Returns a list of run steps belonging to a run. +func (r *BetaThreadRunStepService) List(ctx context.Context, threadID string, runID string, query BetaThreadRunStepListParams, opts ...option.RequestOption) (res *pagination.CursorPage[RunStep], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if threadID == "" { + err = errors.New("missing required thread_id parameter") + return + } + if runID == "" { + err = errors.New("missing required run_id parameter") + return + } + path := fmt.Sprintf("threads/%s/runs/%s/steps", threadID, runID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of run steps belonging to a run. +func (r *BetaThreadRunStepService) ListAutoPaging(ctx context.Context, threadID string, runID string, query BetaThreadRunStepListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[RunStep] { + return pagination.NewCursorPageAutoPager(r.List(ctx, threadID, runID, query, opts...)) +} + +// Text output from the Code Interpreter tool call as part of a run step. +type CodeInterpreterLogs struct { + // The index of the output in the outputs array. + Index int64 `json:"index,required"` + // Always `logs`. + Type CodeInterpreterLogsType `json:"type,required"` + // The text output from the Code Interpreter tool call. + Logs string `json:"logs"` + JSON codeInterpreterLogsJSON `json:"-"` +} + +// codeInterpreterLogsJSON contains the JSON metadata for the struct +// [CodeInterpreterLogs] +type codeInterpreterLogsJSON struct { + Index apijson.Field + Type apijson.Field + Logs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterLogs) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterLogsJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterLogs) implementsCodeInterpreterToolCallDeltaCodeInterpreterOutput() {} + +// Always `logs`. +type CodeInterpreterLogsType string + +const ( + CodeInterpreterLogsTypeLogs CodeInterpreterLogsType = "logs" +) + +func (r CodeInterpreterLogsType) IsKnown() bool { + switch r { + case CodeInterpreterLogsTypeLogs: + return true + } + return false +} + +type CodeInterpreterOutputImage struct { + // The index of the output in the outputs array. + Index int64 `json:"index,required"` + // Always `image`. + Type CodeInterpreterOutputImageType `json:"type,required"` + Image CodeInterpreterOutputImageImage `json:"image"` + JSON codeInterpreterOutputImageJSON `json:"-"` +} + +// codeInterpreterOutputImageJSON contains the JSON metadata for the struct +// [CodeInterpreterOutputImage] +type codeInterpreterOutputImageJSON struct { + Index apijson.Field + Type apijson.Field + Image apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterOutputImage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterOutputImageJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterOutputImage) implementsCodeInterpreterToolCallDeltaCodeInterpreterOutput() {} + +// Always `image`. +type CodeInterpreterOutputImageType string + +const ( + CodeInterpreterOutputImageTypeImage CodeInterpreterOutputImageType = "image" +) + +func (r CodeInterpreterOutputImageType) IsKnown() bool { + switch r { + case CodeInterpreterOutputImageTypeImage: + return true + } + return false +} + +type CodeInterpreterOutputImageImage struct { + // The [file](https://platform.openai.com/docs/api-reference/files) ID of the + // image. + FileID string `json:"file_id"` + JSON codeInterpreterOutputImageImageJSON `json:"-"` +} + +// codeInterpreterOutputImageImageJSON contains the JSON metadata for the struct +// [CodeInterpreterOutputImageImage] +type codeInterpreterOutputImageImageJSON struct { + FileID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterOutputImageImage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterOutputImageImageJSON) RawJSON() string { + return r.raw +} + +// Details of the Code Interpreter tool call the run step was involved in. +type CodeInterpreterToolCall struct { + // The ID of the tool call. + ID string `json:"id,required"` + // The Code Interpreter tool call definition. + CodeInterpreter CodeInterpreterToolCallCodeInterpreter `json:"code_interpreter,required"` + // The type of tool call. This is always going to be `code_interpreter` for this + // type of tool call. + Type CodeInterpreterToolCallType `json:"type,required"` + JSON codeInterpreterToolCallJSON `json:"-"` +} + +// codeInterpreterToolCallJSON contains the JSON metadata for the struct +// [CodeInterpreterToolCall] +type codeInterpreterToolCallJSON struct { + ID apijson.Field + CodeInterpreter apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterToolCall) implementsToolCall() {} + +// The Code Interpreter tool call definition. +type CodeInterpreterToolCallCodeInterpreter struct { + // The input to the Code Interpreter tool call. + Input string `json:"input,required"` + // The outputs from the Code Interpreter tool call. Code Interpreter can output one + // or more items, including text (`logs`) or images (`image`). Each of these are + // represented by a different object type. + Outputs []CodeInterpreterToolCallCodeInterpreterOutput `json:"outputs,required"` + JSON codeInterpreterToolCallCodeInterpreterJSON `json:"-"` +} + +// codeInterpreterToolCallCodeInterpreterJSON contains the JSON metadata for the +// struct [CodeInterpreterToolCallCodeInterpreter] +type codeInterpreterToolCallCodeInterpreterJSON struct { + Input apijson.Field + Outputs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCallCodeInterpreter) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallCodeInterpreterJSON) RawJSON() string { + return r.raw +} + +// Text output from the Code Interpreter tool call as part of a run step. +type CodeInterpreterToolCallCodeInterpreterOutput struct { + // Always `logs`. + Type CodeInterpreterToolCallCodeInterpreterOutputsType `json:"type,required"` + // The text output from the Code Interpreter tool call. + Logs string `json:"logs"` + // This field can have the runtime type of + // [CodeInterpreterToolCallCodeInterpreterOutputsImageImage]. + Image interface{} `json:"image,required"` + JSON codeInterpreterToolCallCodeInterpreterOutputJSON `json:"-"` + union CodeInterpreterToolCallCodeInterpreterOutputsUnion +} + +// codeInterpreterToolCallCodeInterpreterOutputJSON contains the JSON metadata for +// the struct [CodeInterpreterToolCallCodeInterpreterOutput] +type codeInterpreterToolCallCodeInterpreterOutputJSON struct { + Type apijson.Field + Logs apijson.Field + Image apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r codeInterpreterToolCallCodeInterpreterOutputJSON) RawJSON() string { + return r.raw +} + +func (r *CodeInterpreterToolCallCodeInterpreterOutput) UnmarshalJSON(data []byte) (err error) { + *r = CodeInterpreterToolCallCodeInterpreterOutput{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [CodeInterpreterToolCallCodeInterpreterOutputsUnion] interface +// which you can cast to the specific types for more type safety. +// +// Possible runtime types of the union are +// [CodeInterpreterToolCallCodeInterpreterOutputsLogs], +// [CodeInterpreterToolCallCodeInterpreterOutputsImage]. +func (r CodeInterpreterToolCallCodeInterpreterOutput) AsUnion() CodeInterpreterToolCallCodeInterpreterOutputsUnion { + return r.union +} + +// Text output from the Code Interpreter tool call as part of a run step. +// +// Union satisfied by [CodeInterpreterToolCallCodeInterpreterOutputsLogs] or +// [CodeInterpreterToolCallCodeInterpreterOutputsImage]. +type CodeInterpreterToolCallCodeInterpreterOutputsUnion interface { + implementsCodeInterpreterToolCallCodeInterpreterOutput() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*CodeInterpreterToolCallCodeInterpreterOutputsUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterToolCallCodeInterpreterOutputsLogs{}), + DiscriminatorValue: "logs", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterToolCallCodeInterpreterOutputsImage{}), + DiscriminatorValue: "image", + }, + ) +} + +// Text output from the Code Interpreter tool call as part of a run step. +type CodeInterpreterToolCallCodeInterpreterOutputsLogs struct { + // The text output from the Code Interpreter tool call. + Logs string `json:"logs,required"` + // Always `logs`. + Type CodeInterpreterToolCallCodeInterpreterOutputsLogsType `json:"type,required"` + JSON codeInterpreterToolCallCodeInterpreterOutputsLogsJSON `json:"-"` +} + +// codeInterpreterToolCallCodeInterpreterOutputsLogsJSON contains the JSON metadata +// for the struct [CodeInterpreterToolCallCodeInterpreterOutputsLogs] +type codeInterpreterToolCallCodeInterpreterOutputsLogsJSON struct { + Logs apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCallCodeInterpreterOutputsLogs) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallCodeInterpreterOutputsLogsJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterToolCallCodeInterpreterOutputsLogs) implementsCodeInterpreterToolCallCodeInterpreterOutput() { +} + +// Always `logs`. +type CodeInterpreterToolCallCodeInterpreterOutputsLogsType string + +const ( + CodeInterpreterToolCallCodeInterpreterOutputsLogsTypeLogs CodeInterpreterToolCallCodeInterpreterOutputsLogsType = "logs" +) + +func (r CodeInterpreterToolCallCodeInterpreterOutputsLogsType) IsKnown() bool { + switch r { + case CodeInterpreterToolCallCodeInterpreterOutputsLogsTypeLogs: + return true + } + return false +} + +type CodeInterpreterToolCallCodeInterpreterOutputsImage struct { + Image CodeInterpreterToolCallCodeInterpreterOutputsImageImage `json:"image,required"` + // Always `image`. + Type CodeInterpreterToolCallCodeInterpreterOutputsImageType `json:"type,required"` + JSON codeInterpreterToolCallCodeInterpreterOutputsImageJSON `json:"-"` +} + +// codeInterpreterToolCallCodeInterpreterOutputsImageJSON contains the JSON +// metadata for the struct [CodeInterpreterToolCallCodeInterpreterOutputsImage] +type codeInterpreterToolCallCodeInterpreterOutputsImageJSON struct { + Image apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCallCodeInterpreterOutputsImage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallCodeInterpreterOutputsImageJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterToolCallCodeInterpreterOutputsImage) implementsCodeInterpreterToolCallCodeInterpreterOutput() { +} + +type CodeInterpreterToolCallCodeInterpreterOutputsImageImage struct { + // The [file](https://platform.openai.com/docs/api-reference/files) ID of the + // image. + FileID string `json:"file_id,required"` + JSON codeInterpreterToolCallCodeInterpreterOutputsImageImageJSON `json:"-"` +} + +// codeInterpreterToolCallCodeInterpreterOutputsImageImageJSON contains the JSON +// metadata for the struct +// [CodeInterpreterToolCallCodeInterpreterOutputsImageImage] +type codeInterpreterToolCallCodeInterpreterOutputsImageImageJSON struct { + FileID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCallCodeInterpreterOutputsImageImage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallCodeInterpreterOutputsImageImageJSON) RawJSON() string { + return r.raw +} + +// Always `image`. +type CodeInterpreterToolCallCodeInterpreterOutputsImageType string + +const ( + CodeInterpreterToolCallCodeInterpreterOutputsImageTypeImage CodeInterpreterToolCallCodeInterpreterOutputsImageType = "image" +) + +func (r CodeInterpreterToolCallCodeInterpreterOutputsImageType) IsKnown() bool { + switch r { + case CodeInterpreterToolCallCodeInterpreterOutputsImageTypeImage: + return true + } + return false +} + +// Always `logs`. +type CodeInterpreterToolCallCodeInterpreterOutputsType string + +const ( + CodeInterpreterToolCallCodeInterpreterOutputsTypeLogs CodeInterpreterToolCallCodeInterpreterOutputsType = "logs" + CodeInterpreterToolCallCodeInterpreterOutputsTypeImage CodeInterpreterToolCallCodeInterpreterOutputsType = "image" +) + +func (r CodeInterpreterToolCallCodeInterpreterOutputsType) IsKnown() bool { + switch r { + case CodeInterpreterToolCallCodeInterpreterOutputsTypeLogs, CodeInterpreterToolCallCodeInterpreterOutputsTypeImage: + return true + } + return false +} + +// The type of tool call. This is always going to be `code_interpreter` for this +// type of tool call. +type CodeInterpreterToolCallType string + +const ( + CodeInterpreterToolCallTypeCodeInterpreter CodeInterpreterToolCallType = "code_interpreter" +) + +func (r CodeInterpreterToolCallType) IsKnown() bool { + switch r { + case CodeInterpreterToolCallTypeCodeInterpreter: + return true + } + return false +} + +// Details of the Code Interpreter tool call the run step was involved in. +type CodeInterpreterToolCallDelta struct { + // The index of the tool call in the tool calls array. + Index int64 `json:"index,required"` + // The type of tool call. This is always going to be `code_interpreter` for this + // type of tool call. + Type CodeInterpreterToolCallDeltaType `json:"type,required"` + // The ID of the tool call. + ID string `json:"id"` + // The Code Interpreter tool call definition. + CodeInterpreter CodeInterpreterToolCallDeltaCodeInterpreter `json:"code_interpreter"` + JSON codeInterpreterToolCallDeltaJSON `json:"-"` +} + +// codeInterpreterToolCallDeltaJSON contains the JSON metadata for the struct +// [CodeInterpreterToolCallDelta] +type codeInterpreterToolCallDeltaJSON struct { + Index apijson.Field + Type apijson.Field + ID apijson.Field + CodeInterpreter apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCallDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallDeltaJSON) RawJSON() string { + return r.raw +} + +func (r CodeInterpreterToolCallDelta) implementsToolCallDelta() {} + +// The type of tool call. This is always going to be `code_interpreter` for this +// type of tool call. +type CodeInterpreterToolCallDeltaType string + +const ( + CodeInterpreterToolCallDeltaTypeCodeInterpreter CodeInterpreterToolCallDeltaType = "code_interpreter" +) + +func (r CodeInterpreterToolCallDeltaType) IsKnown() bool { + switch r { + case CodeInterpreterToolCallDeltaTypeCodeInterpreter: + return true + } + return false +} + +// The Code Interpreter tool call definition. +type CodeInterpreterToolCallDeltaCodeInterpreter struct { + // The input to the Code Interpreter tool call. + Input string `json:"input"` + // The outputs from the Code Interpreter tool call. Code Interpreter can output one + // or more items, including text (`logs`) or images (`image`). Each of these are + // represented by a different object type. + Outputs []CodeInterpreterToolCallDeltaCodeInterpreterOutput `json:"outputs"` + JSON codeInterpreterToolCallDeltaCodeInterpreterJSON `json:"-"` +} + +// codeInterpreterToolCallDeltaCodeInterpreterJSON contains the JSON metadata for +// the struct [CodeInterpreterToolCallDeltaCodeInterpreter] +type codeInterpreterToolCallDeltaCodeInterpreterJSON struct { + Input apijson.Field + Outputs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CodeInterpreterToolCallDeltaCodeInterpreter) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r codeInterpreterToolCallDeltaCodeInterpreterJSON) RawJSON() string { + return r.raw +} + +// Text output from the Code Interpreter tool call as part of a run step. +type CodeInterpreterToolCallDeltaCodeInterpreterOutput struct { + // The index of the output in the outputs array. + Index int64 `json:"index,required"` + // Always `logs`. + Type CodeInterpreterToolCallDeltaCodeInterpreterOutputsType `json:"type,required"` + // The text output from the Code Interpreter tool call. + Logs string `json:"logs"` + // This field can have the runtime type of [CodeInterpreterOutputImageImage]. + Image interface{} `json:"image,required"` + JSON codeInterpreterToolCallDeltaCodeInterpreterOutputJSON `json:"-"` + union CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion +} + +// codeInterpreterToolCallDeltaCodeInterpreterOutputJSON contains the JSON metadata +// for the struct [CodeInterpreterToolCallDeltaCodeInterpreterOutput] +type codeInterpreterToolCallDeltaCodeInterpreterOutputJSON struct { + Index apijson.Field + Type apijson.Field + Logs apijson.Field + Image apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r codeInterpreterToolCallDeltaCodeInterpreterOutputJSON) RawJSON() string { + return r.raw +} + +func (r *CodeInterpreterToolCallDeltaCodeInterpreterOutput) UnmarshalJSON(data []byte) (err error) { + *r = CodeInterpreterToolCallDeltaCodeInterpreterOutput{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion] +// interface which you can cast to the specific types for more type safety. +// +// Possible runtime types of the union are [CodeInterpreterLogs], +// [CodeInterpreterOutputImage]. +func (r CodeInterpreterToolCallDeltaCodeInterpreterOutput) AsUnion() CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion { + return r.union +} + +// Text output from the Code Interpreter tool call as part of a run step. +// +// Union satisfied by [CodeInterpreterLogs] or [CodeInterpreterOutputImage]. +type CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion interface { + implementsCodeInterpreterToolCallDeltaCodeInterpreterOutput() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterLogs{}), + DiscriminatorValue: "logs", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterOutputImage{}), + DiscriminatorValue: "image", + }, + ) +} + +// Always `logs`. +type CodeInterpreterToolCallDeltaCodeInterpreterOutputsType string + +const ( + CodeInterpreterToolCallDeltaCodeInterpreterOutputsTypeLogs CodeInterpreterToolCallDeltaCodeInterpreterOutputsType = "logs" + CodeInterpreterToolCallDeltaCodeInterpreterOutputsTypeImage CodeInterpreterToolCallDeltaCodeInterpreterOutputsType = "image" +) + +func (r CodeInterpreterToolCallDeltaCodeInterpreterOutputsType) IsKnown() bool { + switch r { + case CodeInterpreterToolCallDeltaCodeInterpreterOutputsTypeLogs, CodeInterpreterToolCallDeltaCodeInterpreterOutputsTypeImage: + return true + } + return false +} + +type FileSearchToolCall struct { + // The ID of the tool call object. + ID string `json:"id,required"` + // For now, this is always going to be an empty object. + FileSearch interface{} `json:"file_search,required"` + // The type of tool call. This is always going to be `file_search` for this type of + // tool call. + Type FileSearchToolCallType `json:"type,required"` + JSON fileSearchToolCallJSON `json:"-"` +} + +// fileSearchToolCallJSON contains the JSON metadata for the struct +// [FileSearchToolCall] +type fileSearchToolCallJSON struct { + ID apijson.Field + FileSearch apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileSearchToolCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileSearchToolCallJSON) RawJSON() string { + return r.raw +} + +func (r FileSearchToolCall) implementsToolCall() {} + +// The type of tool call. This is always going to be `file_search` for this type of +// tool call. +type FileSearchToolCallType string + +const ( + FileSearchToolCallTypeFileSearch FileSearchToolCallType = "file_search" +) + +func (r FileSearchToolCallType) IsKnown() bool { + switch r { + case FileSearchToolCallTypeFileSearch: + return true + } + return false +} + +type FileSearchToolCallDelta struct { + // For now, this is always going to be an empty object. + FileSearch interface{} `json:"file_search,required"` + // The index of the tool call in the tool calls array. + Index int64 `json:"index,required"` + // The type of tool call. This is always going to be `file_search` for this type of + // tool call. + Type FileSearchToolCallDeltaType `json:"type,required"` + // The ID of the tool call object. + ID string `json:"id"` + JSON fileSearchToolCallDeltaJSON `json:"-"` +} + +// fileSearchToolCallDeltaJSON contains the JSON metadata for the struct +// [FileSearchToolCallDelta] +type fileSearchToolCallDeltaJSON struct { + FileSearch apijson.Field + Index apijson.Field + Type apijson.Field + ID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileSearchToolCallDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileSearchToolCallDeltaJSON) RawJSON() string { + return r.raw +} + +func (r FileSearchToolCallDelta) implementsToolCallDelta() {} + +// The type of tool call. This is always going to be `file_search` for this type of +// tool call. +type FileSearchToolCallDeltaType string + +const ( + FileSearchToolCallDeltaTypeFileSearch FileSearchToolCallDeltaType = "file_search" +) + +func (r FileSearchToolCallDeltaType) IsKnown() bool { + switch r { + case FileSearchToolCallDeltaTypeFileSearch: + return true + } + return false +} + +type FunctionToolCall struct { + // The ID of the tool call object. + ID string `json:"id,required"` + // The definition of the function that was called. + Function FunctionToolCallFunction `json:"function,required"` + // The type of tool call. This is always going to be `function` for this type of + // tool call. + Type FunctionToolCallType `json:"type,required"` + JSON functionToolCallJSON `json:"-"` +} + +// functionToolCallJSON contains the JSON metadata for the struct +// [FunctionToolCall] +type functionToolCallJSON struct { + ID apijson.Field + Function apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FunctionToolCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r functionToolCallJSON) RawJSON() string { + return r.raw +} + +func (r FunctionToolCall) implementsToolCall() {} + +// The definition of the function that was called. +type FunctionToolCallFunction struct { + // The arguments passed to the function. + Arguments string `json:"arguments,required"` + // The name of the function. + Name string `json:"name,required"` + // The output of the function. This will be `null` if the outputs have not been + // [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + // yet. + Output string `json:"output,required,nullable"` + JSON functionToolCallFunctionJSON `json:"-"` +} + +// functionToolCallFunctionJSON contains the JSON metadata for the struct +// [FunctionToolCallFunction] +type functionToolCallFunctionJSON struct { + Arguments apijson.Field + Name apijson.Field + Output apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FunctionToolCallFunction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r functionToolCallFunctionJSON) RawJSON() string { + return r.raw +} + +// The type of tool call. This is always going to be `function` for this type of +// tool call. +type FunctionToolCallType string + +const ( + FunctionToolCallTypeFunction FunctionToolCallType = "function" +) + +func (r FunctionToolCallType) IsKnown() bool { + switch r { + case FunctionToolCallTypeFunction: + return true + } + return false +} + +type FunctionToolCallDelta struct { + // The index of the tool call in the tool calls array. + Index int64 `json:"index,required"` + // The type of tool call. This is always going to be `function` for this type of + // tool call. + Type FunctionToolCallDeltaType `json:"type,required"` + // The ID of the tool call object. + ID string `json:"id"` + // The definition of the function that was called. + Function FunctionToolCallDeltaFunction `json:"function"` + JSON functionToolCallDeltaJSON `json:"-"` +} + +// functionToolCallDeltaJSON contains the JSON metadata for the struct +// [FunctionToolCallDelta] +type functionToolCallDeltaJSON struct { + Index apijson.Field + Type apijson.Field + ID apijson.Field + Function apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FunctionToolCallDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r functionToolCallDeltaJSON) RawJSON() string { + return r.raw +} + +func (r FunctionToolCallDelta) implementsToolCallDelta() {} + +// The type of tool call. This is always going to be `function` for this type of +// tool call. +type FunctionToolCallDeltaType string + +const ( + FunctionToolCallDeltaTypeFunction FunctionToolCallDeltaType = "function" +) + +func (r FunctionToolCallDeltaType) IsKnown() bool { + switch r { + case FunctionToolCallDeltaTypeFunction: + return true + } + return false +} + +// The definition of the function that was called. +type FunctionToolCallDeltaFunction struct { + // The arguments passed to the function. + Arguments string `json:"arguments"` + // The name of the function. + Name string `json:"name"` + // The output of the function. This will be `null` if the outputs have not been + // [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + // yet. + Output string `json:"output,nullable"` + JSON functionToolCallDeltaFunctionJSON `json:"-"` +} + +// functionToolCallDeltaFunctionJSON contains the JSON metadata for the struct +// [FunctionToolCallDeltaFunction] +type functionToolCallDeltaFunctionJSON struct { + Arguments apijson.Field + Name apijson.Field + Output apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FunctionToolCallDeltaFunction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r functionToolCallDeltaFunctionJSON) RawJSON() string { + return r.raw +} + +// Details of the message creation by the run step. +type MessageCreationStepDetails struct { + MessageCreation MessageCreationStepDetailsMessageCreation `json:"message_creation,required"` + // Always `message_creation`. + Type MessageCreationStepDetailsType `json:"type,required"` + JSON messageCreationStepDetailsJSON `json:"-"` +} + +// messageCreationStepDetailsJSON contains the JSON metadata for the struct +// [MessageCreationStepDetails] +type messageCreationStepDetailsJSON struct { + MessageCreation apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageCreationStepDetails) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageCreationStepDetailsJSON) RawJSON() string { + return r.raw +} + +func (r MessageCreationStepDetails) implementsRunStepStepDetails() {} + +type MessageCreationStepDetailsMessageCreation struct { + // The ID of the message that was created by this run step. + MessageID string `json:"message_id,required"` + JSON messageCreationStepDetailsMessageCreationJSON `json:"-"` +} + +// messageCreationStepDetailsMessageCreationJSON contains the JSON metadata for the +// struct [MessageCreationStepDetailsMessageCreation] +type messageCreationStepDetailsMessageCreationJSON struct { + MessageID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *MessageCreationStepDetailsMessageCreation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r messageCreationStepDetailsMessageCreationJSON) RawJSON() string { + return r.raw +} + +// Always `message_creation`. +type MessageCreationStepDetailsType string + +const ( + MessageCreationStepDetailsTypeMessageCreation MessageCreationStepDetailsType = "message_creation" +) + +func (r MessageCreationStepDetailsType) IsKnown() bool { + switch r { + case MessageCreationStepDetailsTypeMessageCreation: + return true + } + return false +} + +// Represents a step in execution of a run. +type RunStep struct { + // The identifier of the run step, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The ID of the + // [assistant](https://platform.openai.com/docs/api-reference/assistants) + // associated with the run step. + AssistantID string `json:"assistant_id,required"` + // The Unix timestamp (in seconds) for when the run step was cancelled. + CancelledAt int64 `json:"cancelled_at,required,nullable"` + // The Unix timestamp (in seconds) for when the run step completed. + CompletedAt int64 `json:"completed_at,required,nullable"` + // The Unix timestamp (in seconds) for when the run step was created. + CreatedAt int64 `json:"created_at,required"` + // The Unix timestamp (in seconds) for when the run step expired. A step is + // considered expired if the parent run is expired. + ExpiredAt int64 `json:"expired_at,required,nullable"` + // The Unix timestamp (in seconds) for when the run step failed. + FailedAt int64 `json:"failed_at,required,nullable"` + // The last error associated with this run step. Will be `null` if there are no + // errors. + LastError RunStepLastError `json:"last_error,required,nullable"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,required,nullable"` + // The object type, which is always `thread.run.step`. + Object RunStepObject `json:"object,required"` + // The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that + // this run step is a part of. + RunID string `json:"run_id,required"` + // The status of the run step, which can be either `in_progress`, `cancelled`, + // `failed`, `completed`, or `expired`. + Status RunStepStatus `json:"status,required"` + // The details of the run step. + StepDetails RunStepStepDetails `json:"step_details,required"` + // The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + // that was run. + ThreadID string `json:"thread_id,required"` + // The type of run step, which can be either `message_creation` or `tool_calls`. + Type RunStepType `json:"type,required"` + // Usage statistics related to the run step. This value will be `null` while the + // run step's status is `in_progress`. + Usage RunStepUsage `json:"usage,required,nullable"` + JSON runStepJSON `json:"-"` +} + +// runStepJSON contains the JSON metadata for the struct [RunStep] +type runStepJSON struct { + ID apijson.Field + AssistantID apijson.Field + CancelledAt apijson.Field + CompletedAt apijson.Field + CreatedAt apijson.Field + ExpiredAt apijson.Field + FailedAt apijson.Field + LastError apijson.Field + Metadata apijson.Field + Object apijson.Field + RunID apijson.Field + Status apijson.Field + StepDetails apijson.Field + ThreadID apijson.Field + Type apijson.Field + Usage apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStep) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepJSON) RawJSON() string { + return r.raw +} + +// The last error associated with this run step. Will be `null` if there are no +// errors. +type RunStepLastError struct { + // One of `server_error` or `rate_limit_exceeded`. + Code RunStepLastErrorCode `json:"code,required"` + // A human-readable description of the error. + Message string `json:"message,required"` + JSON runStepLastErrorJSON `json:"-"` +} + +// runStepLastErrorJSON contains the JSON metadata for the struct +// [RunStepLastError] +type runStepLastErrorJSON struct { + Code apijson.Field + Message apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStepLastError) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepLastErrorJSON) RawJSON() string { + return r.raw +} + +// One of `server_error` or `rate_limit_exceeded`. +type RunStepLastErrorCode string + +const ( + RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error" + RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded" +) + +func (r RunStepLastErrorCode) IsKnown() bool { + switch r { + case RunStepLastErrorCodeServerError, RunStepLastErrorCodeRateLimitExceeded: + return true + } + return false +} + +// The object type, which is always `thread.run.step`. +type RunStepObject string + +const ( + RunStepObjectThreadRunStep RunStepObject = "thread.run.step" +) + +func (r RunStepObject) IsKnown() bool { + switch r { + case RunStepObjectThreadRunStep: + return true + } + return false +} + +// The status of the run step, which can be either `in_progress`, `cancelled`, +// `failed`, `completed`, or `expired`. +type RunStepStatus string + +const ( + RunStepStatusInProgress RunStepStatus = "in_progress" + RunStepStatusCancelled RunStepStatus = "cancelled" + RunStepStatusFailed RunStepStatus = "failed" + RunStepStatusCompleted RunStepStatus = "completed" + RunStepStatusExpired RunStepStatus = "expired" +) + +func (r RunStepStatus) IsKnown() bool { + switch r { + case RunStepStatusInProgress, RunStepStatusCancelled, RunStepStatusFailed, RunStepStatusCompleted, RunStepStatusExpired: + return true + } + return false +} + +// The details of the run step. +type RunStepStepDetails struct { + // Always `message_creation`. + Type RunStepStepDetailsType `json:"type,required"` + // This field can have the runtime type of + // [MessageCreationStepDetailsMessageCreation]. + MessageCreation interface{} `json:"message_creation,required"` + // This field can have the runtime type of [[]ToolCall]. + ToolCalls interface{} `json:"tool_calls,required"` + JSON runStepStepDetailsJSON `json:"-"` + union RunStepStepDetailsUnion +} + +// runStepStepDetailsJSON contains the JSON metadata for the struct +// [RunStepStepDetails] +type runStepStepDetailsJSON struct { + Type apijson.Field + MessageCreation apijson.Field + ToolCalls apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r runStepStepDetailsJSON) RawJSON() string { + return r.raw +} + +func (r *RunStepStepDetails) UnmarshalJSON(data []byte) (err error) { + *r = RunStepStepDetails{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [RunStepStepDetailsUnion] interface which you can cast to the +// specific types for more type safety. +// +// Possible runtime types of the union are [MessageCreationStepDetails], +// [ToolCallsStepDetails]. +func (r RunStepStepDetails) AsUnion() RunStepStepDetailsUnion { + return r.union +} + +// The details of the run step. +// +// Union satisfied by [MessageCreationStepDetails] or [ToolCallsStepDetails]. +type RunStepStepDetailsUnion interface { + implementsRunStepStepDetails() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*RunStepStepDetailsUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(MessageCreationStepDetails{}), + DiscriminatorValue: "message_creation", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ToolCallsStepDetails{}), + DiscriminatorValue: "tool_calls", + }, + ) +} + +// Always `message_creation`. +type RunStepStepDetailsType string + +const ( + RunStepStepDetailsTypeMessageCreation RunStepStepDetailsType = "message_creation" + RunStepStepDetailsTypeToolCalls RunStepStepDetailsType = "tool_calls" +) + +func (r RunStepStepDetailsType) IsKnown() bool { + switch r { + case RunStepStepDetailsTypeMessageCreation, RunStepStepDetailsTypeToolCalls: + return true + } + return false +} + +// The type of run step, which can be either `message_creation` or `tool_calls`. +type RunStepType string + +const ( + RunStepTypeMessageCreation RunStepType = "message_creation" + RunStepTypeToolCalls RunStepType = "tool_calls" +) + +func (r RunStepType) IsKnown() bool { + switch r { + case RunStepTypeMessageCreation, RunStepTypeToolCalls: + return true + } + return false +} + +// Usage statistics related to the run step. This value will be `null` while the +// run step's status is `in_progress`. +type RunStepUsage struct { + // Number of completion tokens used over the course of the run step. + CompletionTokens int64 `json:"completion_tokens,required"` + // Number of prompt tokens used over the course of the run step. + PromptTokens int64 `json:"prompt_tokens,required"` + // Total number of tokens used (prompt + completion). + TotalTokens int64 `json:"total_tokens,required"` + JSON runStepUsageJSON `json:"-"` +} + +// runStepUsageJSON contains the JSON metadata for the struct [RunStepUsage] +type runStepUsageJSON struct { + CompletionTokens apijson.Field + PromptTokens apijson.Field + TotalTokens apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStepUsage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepUsageJSON) RawJSON() string { + return r.raw +} + +// The delta containing the fields that have changed on the run step. +type RunStepDelta struct { + // The details of the run step. + StepDetails RunStepDeltaStepDetails `json:"step_details"` + JSON runStepDeltaJSON `json:"-"` +} + +// runStepDeltaJSON contains the JSON metadata for the struct [RunStepDelta] +type runStepDeltaJSON struct { + StepDetails apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStepDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepDeltaJSON) RawJSON() string { + return r.raw +} + +// The details of the run step. +type RunStepDeltaStepDetails struct { + // Always `message_creation`. + Type RunStepDeltaStepDetailsType `json:"type,required"` + // This field can have the runtime type of + // [RunStepDeltaMessageDeltaMessageCreation]. + MessageCreation interface{} `json:"message_creation,required"` + // This field can have the runtime type of [[]ToolCallDelta]. + ToolCalls interface{} `json:"tool_calls,required"` + JSON runStepDeltaStepDetailsJSON `json:"-"` + union RunStepDeltaStepDetailsUnion +} + +// runStepDeltaStepDetailsJSON contains the JSON metadata for the struct +// [RunStepDeltaStepDetails] +type runStepDeltaStepDetailsJSON struct { + Type apijson.Field + MessageCreation apijson.Field + ToolCalls apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r runStepDeltaStepDetailsJSON) RawJSON() string { + return r.raw +} + +func (r *RunStepDeltaStepDetails) UnmarshalJSON(data []byte) (err error) { + *r = RunStepDeltaStepDetails{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [RunStepDeltaStepDetailsUnion] interface which you can cast to +// the specific types for more type safety. +// +// Possible runtime types of the union are [RunStepDeltaMessageDelta], +// [ToolCallDeltaObject]. +func (r RunStepDeltaStepDetails) AsUnion() RunStepDeltaStepDetailsUnion { + return r.union +} + +// The details of the run step. +// +// Union satisfied by [RunStepDeltaMessageDelta] or [ToolCallDeltaObject]. +type RunStepDeltaStepDetailsUnion interface { + implementsRunStepDeltaStepDetails() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*RunStepDeltaStepDetailsUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(RunStepDeltaMessageDelta{}), + DiscriminatorValue: "message_creation", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ToolCallDeltaObject{}), + DiscriminatorValue: "tool_calls", + }, + ) +} + +// Always `message_creation`. +type RunStepDeltaStepDetailsType string + +const ( + RunStepDeltaStepDetailsTypeMessageCreation RunStepDeltaStepDetailsType = "message_creation" + RunStepDeltaStepDetailsTypeToolCalls RunStepDeltaStepDetailsType = "tool_calls" +) + +func (r RunStepDeltaStepDetailsType) IsKnown() bool { + switch r { + case RunStepDeltaStepDetailsTypeMessageCreation, RunStepDeltaStepDetailsTypeToolCalls: + return true + } + return false +} + +// Represents a run step delta i.e. any changed fields on a run step during +// streaming. +type RunStepDeltaEvent struct { + // The identifier of the run step, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The delta containing the fields that have changed on the run step. + Delta RunStepDelta `json:"delta,required"` + // The object type, which is always `thread.run.step.delta`. + Object RunStepDeltaEventObject `json:"object,required"` + JSON runStepDeltaEventJSON `json:"-"` +} + +// runStepDeltaEventJSON contains the JSON metadata for the struct +// [RunStepDeltaEvent] +type runStepDeltaEventJSON struct { + ID apijson.Field + Delta apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStepDeltaEvent) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepDeltaEventJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `thread.run.step.delta`. +type RunStepDeltaEventObject string + +const ( + RunStepDeltaEventObjectThreadRunStepDelta RunStepDeltaEventObject = "thread.run.step.delta" +) + +func (r RunStepDeltaEventObject) IsKnown() bool { + switch r { + case RunStepDeltaEventObjectThreadRunStepDelta: + return true + } + return false +} + +// Details of the message creation by the run step. +type RunStepDeltaMessageDelta struct { + // Always `message_creation`. + Type RunStepDeltaMessageDeltaType `json:"type,required"` + MessageCreation RunStepDeltaMessageDeltaMessageCreation `json:"message_creation"` + JSON runStepDeltaMessageDeltaJSON `json:"-"` +} + +// runStepDeltaMessageDeltaJSON contains the JSON metadata for the struct +// [RunStepDeltaMessageDelta] +type runStepDeltaMessageDeltaJSON struct { + Type apijson.Field + MessageCreation apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStepDeltaMessageDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepDeltaMessageDeltaJSON) RawJSON() string { + return r.raw +} + +func (r RunStepDeltaMessageDelta) implementsRunStepDeltaStepDetails() {} + +// Always `message_creation`. +type RunStepDeltaMessageDeltaType string + +const ( + RunStepDeltaMessageDeltaTypeMessageCreation RunStepDeltaMessageDeltaType = "message_creation" +) + +func (r RunStepDeltaMessageDeltaType) IsKnown() bool { + switch r { + case RunStepDeltaMessageDeltaTypeMessageCreation: + return true + } + return false +} + +type RunStepDeltaMessageDeltaMessageCreation struct { + // The ID of the message that was created by this run step. + MessageID string `json:"message_id"` + JSON runStepDeltaMessageDeltaMessageCreationJSON `json:"-"` +} + +// runStepDeltaMessageDeltaMessageCreationJSON contains the JSON metadata for the +// struct [RunStepDeltaMessageDeltaMessageCreation] +type runStepDeltaMessageDeltaMessageCreationJSON struct { + MessageID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RunStepDeltaMessageDeltaMessageCreation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r runStepDeltaMessageDeltaMessageCreationJSON) RawJSON() string { + return r.raw +} + +// Details of the Code Interpreter tool call the run step was involved in. +type ToolCall struct { + // The ID of the tool call. + ID string `json:"id,required"` + // The type of tool call. This is always going to be `code_interpreter` for this + // type of tool call. + Type ToolCallType `json:"type,required"` + // This field can have the runtime type of + // [CodeInterpreterToolCallCodeInterpreter]. + CodeInterpreter interface{} `json:"code_interpreter,required"` + // This field can have the runtime type of [interface{}]. + FileSearch interface{} `json:"file_search,required"` + // This field can have the runtime type of [FunctionToolCallFunction]. + Function interface{} `json:"function,required"` + JSON toolCallJSON `json:"-"` + union ToolCallUnion +} + +// toolCallJSON contains the JSON metadata for the struct [ToolCall] +type toolCallJSON struct { + ID apijson.Field + Type apijson.Field + CodeInterpreter apijson.Field + FileSearch apijson.Field + Function apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r toolCallJSON) RawJSON() string { + return r.raw +} + +func (r *ToolCall) UnmarshalJSON(data []byte) (err error) { + *r = ToolCall{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [ToolCallUnion] interface which you can cast to the specific +// types for more type safety. +// +// Possible runtime types of the union are [CodeInterpreterToolCall], +// [FileSearchToolCall], [FunctionToolCall]. +func (r ToolCall) AsUnion() ToolCallUnion { + return r.union +} + +// Details of the Code Interpreter tool call the run step was involved in. +// +// Union satisfied by [CodeInterpreterToolCall], [FileSearchToolCall] or +// [FunctionToolCall]. +type ToolCallUnion interface { + implementsToolCall() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*ToolCallUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterToolCall{}), + DiscriminatorValue: "code_interpreter", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FileSearchToolCall{}), + DiscriminatorValue: "file_search", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FunctionToolCall{}), + DiscriminatorValue: "function", + }, + ) +} + +// The type of tool call. This is always going to be `code_interpreter` for this +// type of tool call. +type ToolCallType string + +const ( + ToolCallTypeCodeInterpreter ToolCallType = "code_interpreter" + ToolCallTypeFileSearch ToolCallType = "file_search" + ToolCallTypeFunction ToolCallType = "function" +) + +func (r ToolCallType) IsKnown() bool { + switch r { + case ToolCallTypeCodeInterpreter, ToolCallTypeFileSearch, ToolCallTypeFunction: + return true + } + return false +} + +// Details of the Code Interpreter tool call the run step was involved in. +type ToolCallDelta struct { + // The index of the tool call in the tool calls array. + Index int64 `json:"index,required"` + // The ID of the tool call. + ID string `json:"id"` + // The type of tool call. This is always going to be `code_interpreter` for this + // type of tool call. + Type ToolCallDeltaType `json:"type,required"` + // This field can have the runtime type of + // [CodeInterpreterToolCallDeltaCodeInterpreter]. + CodeInterpreter interface{} `json:"code_interpreter,required"` + // This field can have the runtime type of [interface{}]. + FileSearch interface{} `json:"file_search,required"` + // This field can have the runtime type of [FunctionToolCallDeltaFunction]. + Function interface{} `json:"function,required"` + JSON toolCallDeltaJSON `json:"-"` + union ToolCallDeltaUnion +} + +// toolCallDeltaJSON contains the JSON metadata for the struct [ToolCallDelta] +type toolCallDeltaJSON struct { + Index apijson.Field + ID apijson.Field + Type apijson.Field + CodeInterpreter apijson.Field + FileSearch apijson.Field + Function apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r toolCallDeltaJSON) RawJSON() string { + return r.raw +} + +func (r *ToolCallDelta) UnmarshalJSON(data []byte) (err error) { + *r = ToolCallDelta{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [ToolCallDeltaUnion] interface which you can cast to the +// specific types for more type safety. +// +// Possible runtime types of the union are [CodeInterpreterToolCallDelta], +// [FileSearchToolCallDelta], [FunctionToolCallDelta]. +func (r ToolCallDelta) AsUnion() ToolCallDeltaUnion { + return r.union +} + +// Details of the Code Interpreter tool call the run step was involved in. +// +// Union satisfied by [CodeInterpreterToolCallDelta], [FileSearchToolCallDelta] or +// [FunctionToolCallDelta]. +type ToolCallDeltaUnion interface { + implementsToolCallDelta() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*ToolCallDeltaUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(CodeInterpreterToolCallDelta{}), + DiscriminatorValue: "code_interpreter", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FileSearchToolCallDelta{}), + DiscriminatorValue: "file_search", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(FunctionToolCallDelta{}), + DiscriminatorValue: "function", + }, + ) +} + +// The type of tool call. This is always going to be `code_interpreter` for this +// type of tool call. +type ToolCallDeltaType string + +const ( + ToolCallDeltaTypeCodeInterpreter ToolCallDeltaType = "code_interpreter" + ToolCallDeltaTypeFileSearch ToolCallDeltaType = "file_search" + ToolCallDeltaTypeFunction ToolCallDeltaType = "function" +) + +func (r ToolCallDeltaType) IsKnown() bool { + switch r { + case ToolCallDeltaTypeCodeInterpreter, ToolCallDeltaTypeFileSearch, ToolCallDeltaTypeFunction: + return true + } + return false +} + +// Details of the tool call. +type ToolCallDeltaObject struct { + // Always `tool_calls`. + Type ToolCallDeltaObjectType `json:"type,required"` + // An array of tool calls the run step was involved in. These can be associated + // with one of three types of tools: `code_interpreter`, `file_search`, or + // `function`. + ToolCalls []ToolCallDelta `json:"tool_calls"` + JSON toolCallDeltaObjectJSON `json:"-"` +} + +// toolCallDeltaObjectJSON contains the JSON metadata for the struct +// [ToolCallDeltaObject] +type toolCallDeltaObjectJSON struct { + Type apijson.Field + ToolCalls apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ToolCallDeltaObject) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r toolCallDeltaObjectJSON) RawJSON() string { + return r.raw +} + +func (r ToolCallDeltaObject) implementsRunStepDeltaStepDetails() {} + +// Always `tool_calls`. +type ToolCallDeltaObjectType string + +const ( + ToolCallDeltaObjectTypeToolCalls ToolCallDeltaObjectType = "tool_calls" +) + +func (r ToolCallDeltaObjectType) IsKnown() bool { + switch r { + case ToolCallDeltaObjectTypeToolCalls: + return true + } + return false +} + +// Details of the tool call. +type ToolCallsStepDetails struct { + // An array of tool calls the run step was involved in. These can be associated + // with one of three types of tools: `code_interpreter`, `file_search`, or + // `function`. + ToolCalls []ToolCall `json:"tool_calls,required"` + // Always `tool_calls`. + Type ToolCallsStepDetailsType `json:"type,required"` + JSON toolCallsStepDetailsJSON `json:"-"` +} + +// toolCallsStepDetailsJSON contains the JSON metadata for the struct +// [ToolCallsStepDetails] +type toolCallsStepDetailsJSON struct { + ToolCalls apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ToolCallsStepDetails) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r toolCallsStepDetailsJSON) RawJSON() string { + return r.raw +} + +func (r ToolCallsStepDetails) implementsRunStepStepDetails() {} + +// Always `tool_calls`. +type ToolCallsStepDetailsType string + +const ( + ToolCallsStepDetailsTypeToolCalls ToolCallsStepDetailsType = "tool_calls" +) + +func (r ToolCallsStepDetailsType) IsKnown() bool { + switch r { + case ToolCallsStepDetailsTypeToolCalls: + return true + } + return false +} + +type BetaThreadRunStepListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaThreadRunStepListParamsOrder] `query:"order"` +} + +// URLQuery serializes [BetaThreadRunStepListParams]'s query parameters as +// `url.Values`. +func (r BetaThreadRunStepListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaThreadRunStepListParamsOrder string + +const ( + BetaThreadRunStepListParamsOrderAsc BetaThreadRunStepListParamsOrder = "asc" + BetaThreadRunStepListParamsOrderDesc BetaThreadRunStepListParamsOrder = "desc" +) + +func (r BetaThreadRunStepListParamsOrder) IsKnown() bool { + switch r { + case BetaThreadRunStepListParamsOrderAsc, BetaThreadRunStepListParamsOrderDesc: + return true + } + return false +} diff --git a/betathreadrunstep_test.go b/betathreadrunstep_test.go new file mode 100644 index 0000000..74a0601 --- /dev/null +++ b/betathreadrunstep_test.go @@ -0,0 +1,73 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestBetaThreadRunStepGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.Steps.Get( + context.TODO(), + "thread_id", + "run_id", + "step_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaThreadRunStepListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.Threads.Runs.Steps.List( + context.TODO(), + "thread_id", + "run_id", + openai.BetaThreadRunStepListParams{ + After: openai.F("after"), + Before: openai.F("before"), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaThreadRunStepListParamsOrderAsc), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betavectorstore.go b/betavectorstore.go new file mode 100644 index 0000000..be6dcad --- /dev/null +++ b/betavectorstore.go @@ -0,0 +1,569 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// BetaVectorStoreService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaVectorStoreService] method instead. +type BetaVectorStoreService struct { + Options []option.RequestOption + Files *BetaVectorStoreFileService + FileBatches *BetaVectorStoreFileBatchService +} + +// NewBetaVectorStoreService generates a new service that applies the given options +// to each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewBetaVectorStoreService(opts ...option.RequestOption) (r *BetaVectorStoreService) { + r = &BetaVectorStoreService{} + r.Options = opts + r.Files = NewBetaVectorStoreFileService(opts...) + r.FileBatches = NewBetaVectorStoreFileBatchService(opts...) + return +} + +// Create a vector store. +func (r *BetaVectorStoreService) New(ctx context.Context, body BetaVectorStoreNewParams, opts ...option.RequestOption) (res *VectorStore, err error) { + opts = append(r.Options[:], opts...) + path := "vector_stores" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieves a vector store. +func (r *BetaVectorStoreService) Get(ctx context.Context, vectorStoreID string, opts ...option.RequestOption) (res *VectorStore, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s", vectorStoreID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Modifies a vector store. +func (r *BetaVectorStoreService) Update(ctx context.Context, vectorStoreID string, body BetaVectorStoreUpdateParams, opts ...option.RequestOption) (res *VectorStore, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s", vectorStoreID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Returns a list of vector stores. +func (r *BetaVectorStoreService) List(ctx context.Context, query BetaVectorStoreListParams, opts ...option.RequestOption) (res *pagination.CursorPage[VectorStore], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + path := "vector_stores" + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of vector stores. +func (r *BetaVectorStoreService) ListAutoPaging(ctx context.Context, query BetaVectorStoreListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[VectorStore] { + return pagination.NewCursorPageAutoPager(r.List(ctx, query, opts...)) +} + +// Delete a vector store. +func (r *BetaVectorStoreService) Delete(ctx context.Context, vectorStoreID string, opts ...option.RequestOption) (res *VectorStoreDeleted, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s", vectorStoreID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// A vector store is a collection of processed files can be used by the +// `file_search` tool. +type VectorStore struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the vector store was created. + CreatedAt int64 `json:"created_at,required"` + FileCounts VectorStoreFileCounts `json:"file_counts,required"` + // The Unix timestamp (in seconds) for when the vector store was last active. + LastActiveAt int64 `json:"last_active_at,required,nullable"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata interface{} `json:"metadata,required,nullable"` + // The name of the vector store. + Name string `json:"name,required"` + // The object type, which is always `vector_store`. + Object VectorStoreObject `json:"object,required"` + // The status of the vector store, which can be either `expired`, `in_progress`, or + // `completed`. A status of `completed` indicates that the vector store is ready + // for use. + Status VectorStoreStatus `json:"status,required"` + // The total number of bytes used by the files in the vector store. + UsageBytes int64 `json:"usage_bytes,required"` + // The expiration policy for a vector store. + ExpiresAfter VectorStoreExpiresAfter `json:"expires_after"` + // The Unix timestamp (in seconds) for when the vector store will expire. + ExpiresAt int64 `json:"expires_at,nullable"` + JSON vectorStoreJSON `json:"-"` +} + +// vectorStoreJSON contains the JSON metadata for the struct [VectorStore] +type vectorStoreJSON struct { + ID apijson.Field + CreatedAt apijson.Field + FileCounts apijson.Field + LastActiveAt apijson.Field + Metadata apijson.Field + Name apijson.Field + Object apijson.Field + Status apijson.Field + UsageBytes apijson.Field + ExpiresAfter apijson.Field + ExpiresAt apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStore) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreJSON) RawJSON() string { + return r.raw +} + +type VectorStoreFileCounts struct { + // The number of files that were cancelled. + Cancelled int64 `json:"cancelled,required"` + // The number of files that have been successfully processed. + Completed int64 `json:"completed,required"` + // The number of files that have failed to process. + Failed int64 `json:"failed,required"` + // The number of files that are currently being processed. + InProgress int64 `json:"in_progress,required"` + // The total number of files. + Total int64 `json:"total,required"` + JSON vectorStoreFileCountsJSON `json:"-"` +} + +// vectorStoreFileCountsJSON contains the JSON metadata for the struct +// [VectorStoreFileCounts] +type vectorStoreFileCountsJSON struct { + Cancelled apijson.Field + Completed apijson.Field + Failed apijson.Field + InProgress apijson.Field + Total apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileCounts) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileCountsJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `vector_store`. +type VectorStoreObject string + +const ( + VectorStoreObjectVectorStore VectorStoreObject = "vector_store" +) + +func (r VectorStoreObject) IsKnown() bool { + switch r { + case VectorStoreObjectVectorStore: + return true + } + return false +} + +// The status of the vector store, which can be either `expired`, `in_progress`, or +// `completed`. A status of `completed` indicates that the vector store is ready +// for use. +type VectorStoreStatus string + +const ( + VectorStoreStatusExpired VectorStoreStatus = "expired" + VectorStoreStatusInProgress VectorStoreStatus = "in_progress" + VectorStoreStatusCompleted VectorStoreStatus = "completed" +) + +func (r VectorStoreStatus) IsKnown() bool { + switch r { + case VectorStoreStatusExpired, VectorStoreStatusInProgress, VectorStoreStatusCompleted: + return true + } + return false +} + +// The expiration policy for a vector store. +type VectorStoreExpiresAfter struct { + // Anchor timestamp after which the expiration policy applies. Supported anchors: + // `last_active_at`. + Anchor VectorStoreExpiresAfterAnchor `json:"anchor,required"` + // The number of days after the anchor time that the vector store will expire. + Days int64 `json:"days,required"` + JSON vectorStoreExpiresAfterJSON `json:"-"` +} + +// vectorStoreExpiresAfterJSON contains the JSON metadata for the struct +// [VectorStoreExpiresAfter] +type vectorStoreExpiresAfterJSON struct { + Anchor apijson.Field + Days apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreExpiresAfter) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreExpiresAfterJSON) RawJSON() string { + return r.raw +} + +// Anchor timestamp after which the expiration policy applies. Supported anchors: +// `last_active_at`. +type VectorStoreExpiresAfterAnchor string + +const ( + VectorStoreExpiresAfterAnchorLastActiveAt VectorStoreExpiresAfterAnchor = "last_active_at" +) + +func (r VectorStoreExpiresAfterAnchor) IsKnown() bool { + switch r { + case VectorStoreExpiresAfterAnchorLastActiveAt: + return true + } + return false +} + +type VectorStoreDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object VectorStoreDeletedObject `json:"object,required"` + JSON vectorStoreDeletedJSON `json:"-"` +} + +// vectorStoreDeletedJSON contains the JSON metadata for the struct +// [VectorStoreDeleted] +type vectorStoreDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreDeletedJSON) RawJSON() string { + return r.raw +} + +type VectorStoreDeletedObject string + +const ( + VectorStoreDeletedObjectVectorStoreDeleted VectorStoreDeletedObject = "vector_store.deleted" +) + +func (r VectorStoreDeletedObject) IsKnown() bool { + switch r { + case VectorStoreDeletedObjectVectorStoreDeleted: + return true + } + return false +} + +type BetaVectorStoreNewParams struct { + // The chunking strategy used to chunk the file(s). If not set, will use the `auto` + // strategy. Only applicable if `file_ids` is non-empty. + ChunkingStrategy param.Field[BetaVectorStoreNewParamsChunkingStrategyUnion] `json:"chunking_strategy"` + // The expiration policy for a vector store. + ExpiresAfter param.Field[BetaVectorStoreNewParamsExpiresAfter] `json:"expires_after"` + // A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + // the vector store should use. Useful for tools like `file_search` that can access + // files. + FileIDs param.Field[[]string] `json:"file_ids"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // The name of the vector store. + Name param.Field[string] `json:"name"` +} + +func (r BetaVectorStoreNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. Only applicable if `file_ids` is non-empty. +type BetaVectorStoreNewParamsChunkingStrategy struct { + // Always `auto`. + Type param.Field[BetaVectorStoreNewParamsChunkingStrategyType] `json:"type,required"` + Static param.Field[interface{}] `json:"static,required"` +} + +func (r BetaVectorStoreNewParamsChunkingStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreNewParamsChunkingStrategy) implementsBetaVectorStoreNewParamsChunkingStrategyUnion() { +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. Only applicable if `file_ids` is non-empty. +// +// Satisfied by [BetaVectorStoreNewParamsChunkingStrategyAuto], +// [BetaVectorStoreNewParamsChunkingStrategyStatic], +// [BetaVectorStoreNewParamsChunkingStrategy]. +type BetaVectorStoreNewParamsChunkingStrategyUnion interface { + implementsBetaVectorStoreNewParamsChunkingStrategyUnion() +} + +// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of +// `800` and `chunk_overlap_tokens` of `400`. +type BetaVectorStoreNewParamsChunkingStrategyAuto struct { + // Always `auto`. + Type param.Field[BetaVectorStoreNewParamsChunkingStrategyAutoType] `json:"type,required"` +} + +func (r BetaVectorStoreNewParamsChunkingStrategyAuto) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreNewParamsChunkingStrategyAuto) implementsBetaVectorStoreNewParamsChunkingStrategyUnion() { +} + +// Always `auto`. +type BetaVectorStoreNewParamsChunkingStrategyAutoType string + +const ( + BetaVectorStoreNewParamsChunkingStrategyAutoTypeAuto BetaVectorStoreNewParamsChunkingStrategyAutoType = "auto" +) + +func (r BetaVectorStoreNewParamsChunkingStrategyAutoType) IsKnown() bool { + switch r { + case BetaVectorStoreNewParamsChunkingStrategyAutoTypeAuto: + return true + } + return false +} + +type BetaVectorStoreNewParamsChunkingStrategyStatic struct { + Static param.Field[BetaVectorStoreNewParamsChunkingStrategyStaticStatic] `json:"static,required"` + // Always `static`. + Type param.Field[BetaVectorStoreNewParamsChunkingStrategyStaticType] `json:"type,required"` +} + +func (r BetaVectorStoreNewParamsChunkingStrategyStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreNewParamsChunkingStrategyStatic) implementsBetaVectorStoreNewParamsChunkingStrategyUnion() { +} + +type BetaVectorStoreNewParamsChunkingStrategyStaticStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"` +} + +func (r BetaVectorStoreNewParamsChunkingStrategyStaticStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Always `static`. +type BetaVectorStoreNewParamsChunkingStrategyStaticType string + +const ( + BetaVectorStoreNewParamsChunkingStrategyStaticTypeStatic BetaVectorStoreNewParamsChunkingStrategyStaticType = "static" +) + +func (r BetaVectorStoreNewParamsChunkingStrategyStaticType) IsKnown() bool { + switch r { + case BetaVectorStoreNewParamsChunkingStrategyStaticTypeStatic: + return true + } + return false +} + +// Always `auto`. +type BetaVectorStoreNewParamsChunkingStrategyType string + +const ( + BetaVectorStoreNewParamsChunkingStrategyTypeAuto BetaVectorStoreNewParamsChunkingStrategyType = "auto" + BetaVectorStoreNewParamsChunkingStrategyTypeStatic BetaVectorStoreNewParamsChunkingStrategyType = "static" +) + +func (r BetaVectorStoreNewParamsChunkingStrategyType) IsKnown() bool { + switch r { + case BetaVectorStoreNewParamsChunkingStrategyTypeAuto, BetaVectorStoreNewParamsChunkingStrategyTypeStatic: + return true + } + return false +} + +// The expiration policy for a vector store. +type BetaVectorStoreNewParamsExpiresAfter struct { + // Anchor timestamp after which the expiration policy applies. Supported anchors: + // `last_active_at`. + Anchor param.Field[BetaVectorStoreNewParamsExpiresAfterAnchor] `json:"anchor,required"` + // The number of days after the anchor time that the vector store will expire. + Days param.Field[int64] `json:"days,required"` +} + +func (r BetaVectorStoreNewParamsExpiresAfter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Anchor timestamp after which the expiration policy applies. Supported anchors: +// `last_active_at`. +type BetaVectorStoreNewParamsExpiresAfterAnchor string + +const ( + BetaVectorStoreNewParamsExpiresAfterAnchorLastActiveAt BetaVectorStoreNewParamsExpiresAfterAnchor = "last_active_at" +) + +func (r BetaVectorStoreNewParamsExpiresAfterAnchor) IsKnown() bool { + switch r { + case BetaVectorStoreNewParamsExpiresAfterAnchorLastActiveAt: + return true + } + return false +} + +type BetaVectorStoreUpdateParams struct { + // The expiration policy for a vector store. + ExpiresAfter param.Field[BetaVectorStoreUpdateParamsExpiresAfter] `json:"expires_after"` + // Set of 16 key-value pairs that can be attached to an object. This can be useful + // for storing additional information about the object in a structured format. Keys + // can be a maximum of 64 characters long and values can be a maxium of 512 + // characters long. + Metadata param.Field[interface{}] `json:"metadata"` + // The name of the vector store. + Name param.Field[string] `json:"name"` +} + +func (r BetaVectorStoreUpdateParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The expiration policy for a vector store. +type BetaVectorStoreUpdateParamsExpiresAfter struct { + // Anchor timestamp after which the expiration policy applies. Supported anchors: + // `last_active_at`. + Anchor param.Field[BetaVectorStoreUpdateParamsExpiresAfterAnchor] `json:"anchor,required"` + // The number of days after the anchor time that the vector store will expire. + Days param.Field[int64] `json:"days,required"` +} + +func (r BetaVectorStoreUpdateParamsExpiresAfter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Anchor timestamp after which the expiration policy applies. Supported anchors: +// `last_active_at`. +type BetaVectorStoreUpdateParamsExpiresAfterAnchor string + +const ( + BetaVectorStoreUpdateParamsExpiresAfterAnchorLastActiveAt BetaVectorStoreUpdateParamsExpiresAfterAnchor = "last_active_at" +) + +func (r BetaVectorStoreUpdateParamsExpiresAfterAnchor) IsKnown() bool { + switch r { + case BetaVectorStoreUpdateParamsExpiresAfterAnchorLastActiveAt: + return true + } + return false +} + +type BetaVectorStoreListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaVectorStoreListParamsOrder] `query:"order"` +} + +// URLQuery serializes [BetaVectorStoreListParams]'s query parameters as +// `url.Values`. +func (r BetaVectorStoreListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaVectorStoreListParamsOrder string + +const ( + BetaVectorStoreListParamsOrderAsc BetaVectorStoreListParamsOrder = "asc" + BetaVectorStoreListParamsOrderDesc BetaVectorStoreListParamsOrder = "desc" +) + +func (r BetaVectorStoreListParamsOrder) IsKnown() bool { + switch r { + case BetaVectorStoreListParamsOrderAsc, BetaVectorStoreListParamsOrderDesc: + return true + } + return false +} diff --git a/betavectorstore_test.go b/betavectorstore_test.go new file mode 100644 index 0000000..2c5b2c0 --- /dev/null +++ b/betavectorstore_test.go @@ -0,0 +1,151 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestBetaVectorStoreNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.New(context.TODO(), openai.BetaVectorStoreNewParams{ + ChunkingStrategy: openai.F[openai.BetaVectorStoreNewParamsChunkingStrategyUnion](openai.BetaVectorStoreNewParamsChunkingStrategyAuto{ + Type: openai.F(openai.BetaVectorStoreNewParamsChunkingStrategyAutoTypeAuto), + }), + ExpiresAfter: openai.F(openai.BetaVectorStoreNewParamsExpiresAfter{ + Anchor: openai.F(openai.BetaVectorStoreNewParamsExpiresAfterAnchorLastActiveAt), + Days: openai.F(int64(1)), + }), + FileIDs: openai.F([]string{"string", "string", "string"}), + Metadata: openai.F[any](map[string]interface{}{}), + Name: openai.F("name"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Get(context.TODO(), "vector_store_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreUpdateWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Update( + context.TODO(), + "vector_store_id", + openai.BetaVectorStoreUpdateParams{ + ExpiresAfter: openai.F(openai.BetaVectorStoreUpdateParamsExpiresAfter{ + Anchor: openai.F(openai.BetaVectorStoreUpdateParamsExpiresAfterAnchorLastActiveAt), + Days: openai.F(int64(1)), + }), + Metadata: openai.F[any](map[string]interface{}{}), + Name: openai.F("name"), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.List(context.TODO(), openai.BetaVectorStoreListParams{ + After: openai.F("after"), + Before: openai.F("before"), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaVectorStoreListParamsOrderAsc), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Delete(context.TODO(), "vector_store_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betavectorstorefile.go b/betavectorstorefile.go new file mode 100644 index 0000000..b04e15b --- /dev/null +++ b/betavectorstorefile.go @@ -0,0 +1,670 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/tidwall/gjson" +) + +// BetaVectorStoreFileService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaVectorStoreFileService] method instead. +type BetaVectorStoreFileService struct { + Options []option.RequestOption +} + +// NewBetaVectorStoreFileService generates a new service that applies the given +// options to each request. These options are applied after the parent client's +// options (if there is one), and before any request-specific options. +func NewBetaVectorStoreFileService(opts ...option.RequestOption) (r *BetaVectorStoreFileService) { + r = &BetaVectorStoreFileService{} + r.Options = opts + return +} + +// Create a vector store file by attaching a +// [File](https://platform.openai.com/docs/api-reference/files) to a +// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). +func (r *BetaVectorStoreFileService) New(ctx context.Context, vectorStoreID string, body BetaVectorStoreFileNewParams, opts ...option.RequestOption) (res *VectorStoreFile, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/files", vectorStoreID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieves a vector store file. +func (r *BetaVectorStoreFileService) Get(ctx context.Context, vectorStoreID string, fileID string, opts ...option.RequestOption) (res *VectorStoreFile, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + if fileID == "" { + err = errors.New("missing required file_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/files/%s", vectorStoreID, fileID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Returns a list of vector store files. +func (r *BetaVectorStoreFileService) List(ctx context.Context, vectorStoreID string, query BetaVectorStoreFileListParams, opts ...option.RequestOption) (res *pagination.CursorPage[VectorStoreFile], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/files", vectorStoreID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of vector store files. +func (r *BetaVectorStoreFileService) ListAutoPaging(ctx context.Context, vectorStoreID string, query BetaVectorStoreFileListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[VectorStoreFile] { + return pagination.NewCursorPageAutoPager(r.List(ctx, vectorStoreID, query, opts...)) +} + +// Delete a vector store file. This will remove the file from the vector store but +// the file itself will not be deleted. To delete the file, use the +// [delete file](https://platform.openai.com/docs/api-reference/files/delete) +// endpoint. +func (r *BetaVectorStoreFileService) Delete(ctx context.Context, vectorStoreID string, fileID string, opts ...option.RequestOption) (res *VectorStoreFileDeleted, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + if fileID == "" { + err = errors.New("missing required file_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/files/%s", vectorStoreID, fileID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// A list of files attached to a vector store. +type VectorStoreFile struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the vector store file was created. + CreatedAt int64 `json:"created_at,required"` + // The last error associated with this vector store file. Will be `null` if there + // are no errors. + LastError VectorStoreFileLastError `json:"last_error,required,nullable"` + // The object type, which is always `vector_store.file`. + Object VectorStoreFileObject `json:"object,required"` + // The status of the vector store file, which can be either `in_progress`, + // `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + // vector store file is ready for use. + Status VectorStoreFileStatus `json:"status,required"` + // The total vector store usage in bytes. Note that this may be different from the + // original file size. + UsageBytes int64 `json:"usage_bytes,required"` + // The ID of the + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // that the [File](https://platform.openai.com/docs/api-reference/files) is + // attached to. + VectorStoreID string `json:"vector_store_id,required"` + // The strategy used to chunk the file. + ChunkingStrategy VectorStoreFileChunkingStrategy `json:"chunking_strategy"` + JSON vectorStoreFileJSON `json:"-"` +} + +// vectorStoreFileJSON contains the JSON metadata for the struct [VectorStoreFile] +type vectorStoreFileJSON struct { + ID apijson.Field + CreatedAt apijson.Field + LastError apijson.Field + Object apijson.Field + Status apijson.Field + UsageBytes apijson.Field + VectorStoreID apijson.Field + ChunkingStrategy apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFile) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileJSON) RawJSON() string { + return r.raw +} + +// The last error associated with this vector store file. Will be `null` if there +// are no errors. +type VectorStoreFileLastError struct { + // One of `server_error` or `rate_limit_exceeded`. + Code VectorStoreFileLastErrorCode `json:"code,required"` + // A human-readable description of the error. + Message string `json:"message,required"` + JSON vectorStoreFileLastErrorJSON `json:"-"` +} + +// vectorStoreFileLastErrorJSON contains the JSON metadata for the struct +// [VectorStoreFileLastError] +type vectorStoreFileLastErrorJSON struct { + Code apijson.Field + Message apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileLastError) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileLastErrorJSON) RawJSON() string { + return r.raw +} + +// One of `server_error` or `rate_limit_exceeded`. +type VectorStoreFileLastErrorCode string + +const ( + VectorStoreFileLastErrorCodeInternalError VectorStoreFileLastErrorCode = "internal_error" + VectorStoreFileLastErrorCodeFileNotFound VectorStoreFileLastErrorCode = "file_not_found" + VectorStoreFileLastErrorCodeParsingError VectorStoreFileLastErrorCode = "parsing_error" + VectorStoreFileLastErrorCodeUnhandledMimeType VectorStoreFileLastErrorCode = "unhandled_mime_type" +) + +func (r VectorStoreFileLastErrorCode) IsKnown() bool { + switch r { + case VectorStoreFileLastErrorCodeInternalError, VectorStoreFileLastErrorCodeFileNotFound, VectorStoreFileLastErrorCodeParsingError, VectorStoreFileLastErrorCodeUnhandledMimeType: + return true + } + return false +} + +// The object type, which is always `vector_store.file`. +type VectorStoreFileObject string + +const ( + VectorStoreFileObjectVectorStoreFile VectorStoreFileObject = "vector_store.file" +) + +func (r VectorStoreFileObject) IsKnown() bool { + switch r { + case VectorStoreFileObjectVectorStoreFile: + return true + } + return false +} + +// The status of the vector store file, which can be either `in_progress`, +// `completed`, `cancelled`, or `failed`. The status `completed` indicates that the +// vector store file is ready for use. +type VectorStoreFileStatus string + +const ( + VectorStoreFileStatusInProgress VectorStoreFileStatus = "in_progress" + VectorStoreFileStatusCompleted VectorStoreFileStatus = "completed" + VectorStoreFileStatusCancelled VectorStoreFileStatus = "cancelled" + VectorStoreFileStatusFailed VectorStoreFileStatus = "failed" +) + +func (r VectorStoreFileStatus) IsKnown() bool { + switch r { + case VectorStoreFileStatusInProgress, VectorStoreFileStatusCompleted, VectorStoreFileStatusCancelled, VectorStoreFileStatusFailed: + return true + } + return false +} + +// The strategy used to chunk the file. +type VectorStoreFileChunkingStrategy struct { + // Always `static`. + Type VectorStoreFileChunkingStrategyType `json:"type,required"` + // This field can have the runtime type of + // [VectorStoreFileChunkingStrategyStaticStatic]. + Static interface{} `json:"static,required"` + JSON vectorStoreFileChunkingStrategyJSON `json:"-"` + union VectorStoreFileChunkingStrategyUnion +} + +// vectorStoreFileChunkingStrategyJSON contains the JSON metadata for the struct +// [VectorStoreFileChunkingStrategy] +type vectorStoreFileChunkingStrategyJSON struct { + Type apijson.Field + Static apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r vectorStoreFileChunkingStrategyJSON) RawJSON() string { + return r.raw +} + +func (r *VectorStoreFileChunkingStrategy) UnmarshalJSON(data []byte) (err error) { + *r = VectorStoreFileChunkingStrategy{} + err = apijson.UnmarshalRoot(data, &r.union) + if err != nil { + return err + } + return apijson.Port(r.union, &r) +} + +// AsUnion returns a [VectorStoreFileChunkingStrategyUnion] interface which you can +// cast to the specific types for more type safety. +// +// Possible runtime types of the union are [VectorStoreFileChunkingStrategyStatic], +// [VectorStoreFileChunkingStrategyOther]. +func (r VectorStoreFileChunkingStrategy) AsUnion() VectorStoreFileChunkingStrategyUnion { + return r.union +} + +// The strategy used to chunk the file. +// +// Union satisfied by [VectorStoreFileChunkingStrategyStatic] or +// [VectorStoreFileChunkingStrategyOther]. +type VectorStoreFileChunkingStrategyUnion interface { + implementsVectorStoreFileChunkingStrategy() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*VectorStoreFileChunkingStrategyUnion)(nil)).Elem(), + "type", + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(VectorStoreFileChunkingStrategyStatic{}), + DiscriminatorValue: "static", + }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(VectorStoreFileChunkingStrategyOther{}), + DiscriminatorValue: "other", + }, + ) +} + +type VectorStoreFileChunkingStrategyStatic struct { + Static VectorStoreFileChunkingStrategyStaticStatic `json:"static,required"` + // Always `static`. + Type VectorStoreFileChunkingStrategyStaticType `json:"type,required"` + JSON vectorStoreFileChunkingStrategyStaticJSON `json:"-"` +} + +// vectorStoreFileChunkingStrategyStaticJSON contains the JSON metadata for the +// struct [VectorStoreFileChunkingStrategyStatic] +type vectorStoreFileChunkingStrategyStaticJSON struct { + Static apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileChunkingStrategyStatic) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileChunkingStrategyStaticJSON) RawJSON() string { + return r.raw +} + +func (r VectorStoreFileChunkingStrategyStatic) implementsVectorStoreFileChunkingStrategy() {} + +type VectorStoreFileChunkingStrategyStaticStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens int64 `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens int64 `json:"max_chunk_size_tokens,required"` + JSON vectorStoreFileChunkingStrategyStaticStaticJSON `json:"-"` +} + +// vectorStoreFileChunkingStrategyStaticStaticJSON contains the JSON metadata for +// the struct [VectorStoreFileChunkingStrategyStaticStatic] +type vectorStoreFileChunkingStrategyStaticStaticJSON struct { + ChunkOverlapTokens apijson.Field + MaxChunkSizeTokens apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileChunkingStrategyStaticStatic) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileChunkingStrategyStaticStaticJSON) RawJSON() string { + return r.raw +} + +// Always `static`. +type VectorStoreFileChunkingStrategyStaticType string + +const ( + VectorStoreFileChunkingStrategyStaticTypeStatic VectorStoreFileChunkingStrategyStaticType = "static" +) + +func (r VectorStoreFileChunkingStrategyStaticType) IsKnown() bool { + switch r { + case VectorStoreFileChunkingStrategyStaticTypeStatic: + return true + } + return false +} + +// This is returned when the chunking strategy is unknown. Typically, this is +// because the file was indexed before the `chunking_strategy` concept was +// introduced in the API. +type VectorStoreFileChunkingStrategyOther struct { + // Always `other`. + Type VectorStoreFileChunkingStrategyOtherType `json:"type,required"` + JSON vectorStoreFileChunkingStrategyOtherJSON `json:"-"` +} + +// vectorStoreFileChunkingStrategyOtherJSON contains the JSON metadata for the +// struct [VectorStoreFileChunkingStrategyOther] +type vectorStoreFileChunkingStrategyOtherJSON struct { + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileChunkingStrategyOther) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileChunkingStrategyOtherJSON) RawJSON() string { + return r.raw +} + +func (r VectorStoreFileChunkingStrategyOther) implementsVectorStoreFileChunkingStrategy() {} + +// Always `other`. +type VectorStoreFileChunkingStrategyOtherType string + +const ( + VectorStoreFileChunkingStrategyOtherTypeOther VectorStoreFileChunkingStrategyOtherType = "other" +) + +func (r VectorStoreFileChunkingStrategyOtherType) IsKnown() bool { + switch r { + case VectorStoreFileChunkingStrategyOtherTypeOther: + return true + } + return false +} + +// Always `static`. +type VectorStoreFileChunkingStrategyType string + +const ( + VectorStoreFileChunkingStrategyTypeStatic VectorStoreFileChunkingStrategyType = "static" + VectorStoreFileChunkingStrategyTypeOther VectorStoreFileChunkingStrategyType = "other" +) + +func (r VectorStoreFileChunkingStrategyType) IsKnown() bool { + switch r { + case VectorStoreFileChunkingStrategyTypeStatic, VectorStoreFileChunkingStrategyTypeOther: + return true + } + return false +} + +type VectorStoreFileDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object VectorStoreFileDeletedObject `json:"object,required"` + JSON vectorStoreFileDeletedJSON `json:"-"` +} + +// vectorStoreFileDeletedJSON contains the JSON metadata for the struct +// [VectorStoreFileDeleted] +type vectorStoreFileDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileDeletedJSON) RawJSON() string { + return r.raw +} + +type VectorStoreFileDeletedObject string + +const ( + VectorStoreFileDeletedObjectVectorStoreFileDeleted VectorStoreFileDeletedObject = "vector_store.file.deleted" +) + +func (r VectorStoreFileDeletedObject) IsKnown() bool { + switch r { + case VectorStoreFileDeletedObjectVectorStoreFileDeleted: + return true + } + return false +} + +type BetaVectorStoreFileNewParams struct { + // A [File](https://platform.openai.com/docs/api-reference/files) ID that the + // vector store should use. Useful for tools like `file_search` that can access + // files. + FileID param.Field[string] `json:"file_id,required"` + // The chunking strategy used to chunk the file(s). If not set, will use the `auto` + // strategy. + ChunkingStrategy param.Field[BetaVectorStoreFileNewParamsChunkingStrategyUnion] `json:"chunking_strategy"` +} + +func (r BetaVectorStoreFileNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +type BetaVectorStoreFileNewParamsChunkingStrategy struct { + // Always `auto`. + Type param.Field[BetaVectorStoreFileNewParamsChunkingStrategyType] `json:"type,required"` + Static param.Field[interface{}] `json:"static,required"` +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategy) implementsBetaVectorStoreFileNewParamsChunkingStrategyUnion() { +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +// +// Satisfied by +// [BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParam], +// [BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParam], +// [BetaVectorStoreFileNewParamsChunkingStrategy]. +type BetaVectorStoreFileNewParamsChunkingStrategyUnion interface { + implementsBetaVectorStoreFileNewParamsChunkingStrategyUnion() +} + +// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of +// `800` and `chunk_overlap_tokens` of `400`. +type BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParam struct { + // Always `auto`. + Type param.Field[BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType] `json:"type,required"` +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParam) implementsBetaVectorStoreFileNewParamsChunkingStrategyUnion() { +} + +// Always `auto`. +type BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType string + +const ( + BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamTypeAuto BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType = "auto" +) + +func (r BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType) IsKnown() bool { + switch r { + case BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamTypeAuto: + return true + } + return false +} + +type BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParam struct { + Static param.Field[BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamStatic] `json:"static,required"` + // Always `static`. + Type param.Field[BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType] `json:"type,required"` +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParam) implementsBetaVectorStoreFileNewParamsChunkingStrategyUnion() { +} + +type BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"` +} + +func (r BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Always `static`. +type BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType string + +const ( + BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamTypeStatic BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType = "static" +) + +func (r BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType) IsKnown() bool { + switch r { + case BetaVectorStoreFileNewParamsChunkingStrategyStaticChunkingStrategyRequestParamTypeStatic: + return true + } + return false +} + +// Always `auto`. +type BetaVectorStoreFileNewParamsChunkingStrategyType string + +const ( + BetaVectorStoreFileNewParamsChunkingStrategyTypeAuto BetaVectorStoreFileNewParamsChunkingStrategyType = "auto" + BetaVectorStoreFileNewParamsChunkingStrategyTypeStatic BetaVectorStoreFileNewParamsChunkingStrategyType = "static" +) + +func (r BetaVectorStoreFileNewParamsChunkingStrategyType) IsKnown() bool { + switch r { + case BetaVectorStoreFileNewParamsChunkingStrategyTypeAuto, BetaVectorStoreFileNewParamsChunkingStrategyTypeStatic: + return true + } + return false +} + +type BetaVectorStoreFileListParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + Filter param.Field[BetaVectorStoreFileListParamsFilter] `query:"filter"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaVectorStoreFileListParamsOrder] `query:"order"` +} + +// URLQuery serializes [BetaVectorStoreFileListParams]'s query parameters as +// `url.Values`. +func (r BetaVectorStoreFileListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. +type BetaVectorStoreFileListParamsFilter string + +const ( + BetaVectorStoreFileListParamsFilterInProgress BetaVectorStoreFileListParamsFilter = "in_progress" + BetaVectorStoreFileListParamsFilterCompleted BetaVectorStoreFileListParamsFilter = "completed" + BetaVectorStoreFileListParamsFilterFailed BetaVectorStoreFileListParamsFilter = "failed" + BetaVectorStoreFileListParamsFilterCancelled BetaVectorStoreFileListParamsFilter = "cancelled" +) + +func (r BetaVectorStoreFileListParamsFilter) IsKnown() bool { + switch r { + case BetaVectorStoreFileListParamsFilterInProgress, BetaVectorStoreFileListParamsFilterCompleted, BetaVectorStoreFileListParamsFilterFailed, BetaVectorStoreFileListParamsFilterCancelled: + return true + } + return false +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaVectorStoreFileListParamsOrder string + +const ( + BetaVectorStoreFileListParamsOrderAsc BetaVectorStoreFileListParamsOrder = "asc" + BetaVectorStoreFileListParamsOrderDesc BetaVectorStoreFileListParamsOrder = "desc" +) + +func (r BetaVectorStoreFileListParamsOrder) IsKnown() bool { + switch r { + case BetaVectorStoreFileListParamsOrderAsc, BetaVectorStoreFileListParamsOrderDesc: + return true + } + return false +} diff --git a/betavectorstorefile_test.go b/betavectorstorefile_test.go new file mode 100644 index 0000000..6f1a1f0 --- /dev/null +++ b/betavectorstorefile_test.go @@ -0,0 +1,129 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestBetaVectorStoreFileNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Files.New( + context.TODO(), + "vs_abc123", + openai.BetaVectorStoreFileNewParams{ + FileID: openai.F("file_id"), + ChunkingStrategy: openai.F[openai.BetaVectorStoreFileNewParamsChunkingStrategyUnion](openai.BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParam{ + Type: openai.F(openai.BetaVectorStoreFileNewParamsChunkingStrategyAutoChunkingStrategyRequestParamTypeAuto), + }), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreFileGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Files.Get( + context.TODO(), + "vs_abc123", + "file-abc123", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreFileListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Files.List( + context.TODO(), + "vector_store_id", + openai.BetaVectorStoreFileListParams{ + After: openai.F("after"), + Before: openai.F("before"), + Filter: openai.F(openai.BetaVectorStoreFileListParamsFilterInProgress), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaVectorStoreFileListParamsOrderAsc), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreFileDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.Files.Delete( + context.TODO(), + "vector_store_id", + "file_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/betavectorstorefilebatch.go b/betavectorstorefilebatch.go new file mode 100644 index 0000000..12047a4 --- /dev/null +++ b/betavectorstorefilebatch.go @@ -0,0 +1,415 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// BetaVectorStoreFileBatchService contains methods and other services that help +// with interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBetaVectorStoreFileBatchService] method instead. +type BetaVectorStoreFileBatchService struct { + Options []option.RequestOption +} + +// NewBetaVectorStoreFileBatchService generates a new service that applies the +// given options to each request. These options are applied after the parent +// client's options (if there is one), and before any request-specific options. +func NewBetaVectorStoreFileBatchService(opts ...option.RequestOption) (r *BetaVectorStoreFileBatchService) { + r = &BetaVectorStoreFileBatchService{} + r.Options = opts + return +} + +// Create a vector store file batch. +func (r *BetaVectorStoreFileBatchService) New(ctx context.Context, vectorStoreID string, body BetaVectorStoreFileBatchNewParams, opts ...option.RequestOption) (res *VectorStoreFileBatch, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/file_batches", vectorStoreID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Retrieves a vector store file batch. +func (r *BetaVectorStoreFileBatchService) Get(ctx context.Context, vectorStoreID string, batchID string, opts ...option.RequestOption) (res *VectorStoreFileBatch, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + if batchID == "" { + err = errors.New("missing required batch_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/file_batches/%s", vectorStoreID, batchID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Cancel a vector store file batch. This attempts to cancel the processing of +// files in this batch as soon as possible. +func (r *BetaVectorStoreFileBatchService) Cancel(ctx context.Context, vectorStoreID string, batchID string, opts ...option.RequestOption) (res *VectorStoreFileBatch, err error) { + opts = append(r.Options[:], opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + if batchID == "" { + err = errors.New("missing required batch_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/file_batches/%s/cancel", vectorStoreID, batchID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +// Returns a list of vector store files in a batch. +func (r *BetaVectorStoreFileBatchService) ListFiles(ctx context.Context, vectorStoreID string, batchID string, query BetaVectorStoreFileBatchListFilesParams, opts ...option.RequestOption) (res *pagination.CursorPage[VectorStoreFile], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if vectorStoreID == "" { + err = errors.New("missing required vector_store_id parameter") + return + } + if batchID == "" { + err = errors.New("missing required batch_id parameter") + return + } + path := fmt.Sprintf("vector_stores/%s/file_batches/%s/files", vectorStoreID, batchID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of vector store files in a batch. +func (r *BetaVectorStoreFileBatchService) ListFilesAutoPaging(ctx context.Context, vectorStoreID string, batchID string, query BetaVectorStoreFileBatchListFilesParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[VectorStoreFile] { + return pagination.NewCursorPageAutoPager(r.ListFiles(ctx, vectorStoreID, batchID, query, opts...)) +} + +// A batch of files attached to a vector store. +type VectorStoreFileBatch struct { + // The identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the vector store files batch was + // created. + CreatedAt int64 `json:"created_at,required"` + FileCounts VectorStoreFileBatchFileCounts `json:"file_counts,required"` + // The object type, which is always `vector_store.file_batch`. + Object VectorStoreFileBatchObject `json:"object,required"` + // The status of the vector store files batch, which can be either `in_progress`, + // `completed`, `cancelled` or `failed`. + Status VectorStoreFileBatchStatus `json:"status,required"` + // The ID of the + // [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + // that the [File](https://platform.openai.com/docs/api-reference/files) is + // attached to. + VectorStoreID string `json:"vector_store_id,required"` + JSON vectorStoreFileBatchJSON `json:"-"` +} + +// vectorStoreFileBatchJSON contains the JSON metadata for the struct +// [VectorStoreFileBatch] +type vectorStoreFileBatchJSON struct { + ID apijson.Field + CreatedAt apijson.Field + FileCounts apijson.Field + Object apijson.Field + Status apijson.Field + VectorStoreID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileBatch) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileBatchJSON) RawJSON() string { + return r.raw +} + +type VectorStoreFileBatchFileCounts struct { + // The number of files that where cancelled. + Cancelled int64 `json:"cancelled,required"` + // The number of files that have been processed. + Completed int64 `json:"completed,required"` + // The number of files that have failed to process. + Failed int64 `json:"failed,required"` + // The number of files that are currently being processed. + InProgress int64 `json:"in_progress,required"` + // The total number of files. + Total int64 `json:"total,required"` + JSON vectorStoreFileBatchFileCountsJSON `json:"-"` +} + +// vectorStoreFileBatchFileCountsJSON contains the JSON metadata for the struct +// [VectorStoreFileBatchFileCounts] +type vectorStoreFileBatchFileCountsJSON struct { + Cancelled apijson.Field + Completed apijson.Field + Failed apijson.Field + InProgress apijson.Field + Total apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *VectorStoreFileBatchFileCounts) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r vectorStoreFileBatchFileCountsJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `vector_store.file_batch`. +type VectorStoreFileBatchObject string + +const ( + VectorStoreFileBatchObjectVectorStoreFilesBatch VectorStoreFileBatchObject = "vector_store.files_batch" +) + +func (r VectorStoreFileBatchObject) IsKnown() bool { + switch r { + case VectorStoreFileBatchObjectVectorStoreFilesBatch: + return true + } + return false +} + +// The status of the vector store files batch, which can be either `in_progress`, +// `completed`, `cancelled` or `failed`. +type VectorStoreFileBatchStatus string + +const ( + VectorStoreFileBatchStatusInProgress VectorStoreFileBatchStatus = "in_progress" + VectorStoreFileBatchStatusCompleted VectorStoreFileBatchStatus = "completed" + VectorStoreFileBatchStatusCancelled VectorStoreFileBatchStatus = "cancelled" + VectorStoreFileBatchStatusFailed VectorStoreFileBatchStatus = "failed" +) + +func (r VectorStoreFileBatchStatus) IsKnown() bool { + switch r { + case VectorStoreFileBatchStatusInProgress, VectorStoreFileBatchStatusCompleted, VectorStoreFileBatchStatusCancelled, VectorStoreFileBatchStatusFailed: + return true + } + return false +} + +type BetaVectorStoreFileBatchNewParams struct { + // A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + // the vector store should use. Useful for tools like `file_search` that can access + // files. + FileIDs param.Field[[]string] `json:"file_ids,required"` + // The chunking strategy used to chunk the file(s). If not set, will use the `auto` + // strategy. + ChunkingStrategy param.Field[BetaVectorStoreFileBatchNewParamsChunkingStrategyUnion] `json:"chunking_strategy"` +} + +func (r BetaVectorStoreFileBatchNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +type BetaVectorStoreFileBatchNewParamsChunkingStrategy struct { + // Always `auto`. + Type param.Field[BetaVectorStoreFileBatchNewParamsChunkingStrategyType] `json:"type,required"` + Static param.Field[interface{}] `json:"static,required"` +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategy) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategy) implementsBetaVectorStoreFileBatchNewParamsChunkingStrategyUnion() { +} + +// The chunking strategy used to chunk the file(s). If not set, will use the `auto` +// strategy. +// +// Satisfied by +// [BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParam], +// [BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParam], +// [BetaVectorStoreFileBatchNewParamsChunkingStrategy]. +type BetaVectorStoreFileBatchNewParamsChunkingStrategyUnion interface { + implementsBetaVectorStoreFileBatchNewParamsChunkingStrategyUnion() +} + +// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of +// `800` and `chunk_overlap_tokens` of `400`. +type BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParam struct { + // Always `auto`. + Type param.Field[BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType] `json:"type,required"` +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParam) implementsBetaVectorStoreFileBatchNewParamsChunkingStrategyUnion() { +} + +// Always `auto`. +type BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType string + +const ( + BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamTypeAuto BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType = "auto" +) + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamType) IsKnown() bool { + switch r { + case BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamTypeAuto: + return true + } + return false +} + +type BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParam struct { + Static param.Field[BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamStatic] `json:"static,required"` + // Always `static`. + Type param.Field[BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType] `json:"type,required"` +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParam) implementsBetaVectorStoreFileBatchNewParamsChunkingStrategyUnion() { +} + +type BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamStatic struct { + // The number of tokens that overlap between chunks. The default value is `400`. + // + // Note that the overlap must not exceed half of `max_chunk_size_tokens`. + ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"` + // The maximum number of tokens in each chunk. The default value is `800`. The + // minimum value is `100` and the maximum value is `4096`. + MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"` +} + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamStatic) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Always `static`. +type BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType string + +const ( + BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamTypeStatic BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType = "static" +) + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamType) IsKnown() bool { + switch r { + case BetaVectorStoreFileBatchNewParamsChunkingStrategyStaticChunkingStrategyRequestParamTypeStatic: + return true + } + return false +} + +// Always `auto`. +type BetaVectorStoreFileBatchNewParamsChunkingStrategyType string + +const ( + BetaVectorStoreFileBatchNewParamsChunkingStrategyTypeAuto BetaVectorStoreFileBatchNewParamsChunkingStrategyType = "auto" + BetaVectorStoreFileBatchNewParamsChunkingStrategyTypeStatic BetaVectorStoreFileBatchNewParamsChunkingStrategyType = "static" +) + +func (r BetaVectorStoreFileBatchNewParamsChunkingStrategyType) IsKnown() bool { + switch r { + case BetaVectorStoreFileBatchNewParamsChunkingStrategyTypeAuto, BetaVectorStoreFileBatchNewParamsChunkingStrategyTypeStatic: + return true + } + return false +} + +type BetaVectorStoreFileBatchListFilesParams struct { + // A cursor for use in pagination. `after` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include after=obj_foo in order to + // fetch the next page of the list. + After param.Field[string] `query:"after"` + // A cursor for use in pagination. `before` is an object ID that defines your place + // in the list. For instance, if you make a list request and receive 100 objects, + // ending with obj_foo, your subsequent call can include before=obj_foo in order to + // fetch the previous page of the list. + Before param.Field[string] `query:"before"` + // Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + Filter param.Field[BetaVectorStoreFileBatchListFilesParamsFilter] `query:"filter"` + // A limit on the number of objects to be returned. Limit can range between 1 and + // 100, and the default is 20. + Limit param.Field[int64] `query:"limit"` + // Sort order by the `created_at` timestamp of the objects. `asc` for ascending + // order and `desc` for descending order. + Order param.Field[BetaVectorStoreFileBatchListFilesParamsOrder] `query:"order"` +} + +// URLQuery serializes [BetaVectorStoreFileBatchListFilesParams]'s query parameters +// as `url.Values`. +func (r BetaVectorStoreFileBatchListFilesParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. +type BetaVectorStoreFileBatchListFilesParamsFilter string + +const ( + BetaVectorStoreFileBatchListFilesParamsFilterInProgress BetaVectorStoreFileBatchListFilesParamsFilter = "in_progress" + BetaVectorStoreFileBatchListFilesParamsFilterCompleted BetaVectorStoreFileBatchListFilesParamsFilter = "completed" + BetaVectorStoreFileBatchListFilesParamsFilterFailed BetaVectorStoreFileBatchListFilesParamsFilter = "failed" + BetaVectorStoreFileBatchListFilesParamsFilterCancelled BetaVectorStoreFileBatchListFilesParamsFilter = "cancelled" +) + +func (r BetaVectorStoreFileBatchListFilesParamsFilter) IsKnown() bool { + switch r { + case BetaVectorStoreFileBatchListFilesParamsFilterInProgress, BetaVectorStoreFileBatchListFilesParamsFilterCompleted, BetaVectorStoreFileBatchListFilesParamsFilterFailed, BetaVectorStoreFileBatchListFilesParamsFilterCancelled: + return true + } + return false +} + +// Sort order by the `created_at` timestamp of the objects. `asc` for ascending +// order and `desc` for descending order. +type BetaVectorStoreFileBatchListFilesParamsOrder string + +const ( + BetaVectorStoreFileBatchListFilesParamsOrderAsc BetaVectorStoreFileBatchListFilesParamsOrder = "asc" + BetaVectorStoreFileBatchListFilesParamsOrderDesc BetaVectorStoreFileBatchListFilesParamsOrder = "desc" +) + +func (r BetaVectorStoreFileBatchListFilesParamsOrder) IsKnown() bool { + switch r { + case BetaVectorStoreFileBatchListFilesParamsOrderAsc, BetaVectorStoreFileBatchListFilesParamsOrderDesc: + return true + } + return false +} diff --git a/betavectorstorefilebatch_test.go b/betavectorstorefilebatch_test.go new file mode 100644 index 0000000..78d8219 --- /dev/null +++ b/betavectorstorefilebatch_test.go @@ -0,0 +1,130 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestBetaVectorStoreFileBatchNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.FileBatches.New( + context.TODO(), + "vs_abc123", + openai.BetaVectorStoreFileBatchNewParams{ + FileIDs: openai.F([]string{"string"}), + ChunkingStrategy: openai.F[openai.BetaVectorStoreFileBatchNewParamsChunkingStrategyUnion](openai.BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParam{ + Type: openai.F(openai.BetaVectorStoreFileBatchNewParamsChunkingStrategyAutoChunkingStrategyRequestParamTypeAuto), + }), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreFileBatchGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.FileBatches.Get( + context.TODO(), + "vs_abc123", + "vsfb_abc123", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreFileBatchCancel(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.FileBatches.Cancel( + context.TODO(), + "vector_store_id", + "batch_id", + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBetaVectorStoreFileBatchListFilesWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Beta.VectorStores.FileBatches.ListFiles( + context.TODO(), + "vector_store_id", + "batch_id", + openai.BetaVectorStoreFileBatchListFilesParams{ + After: openai.F("after"), + Before: openai.F("before"), + Filter: openai.F(openai.BetaVectorStoreFileBatchListFilesParamsFilterInProgress), + Limit: openai.F(int64(0)), + Order: openai.F(openai.BetaVectorStoreFileBatchListFilesParamsOrderAsc), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/chat.go b/chat.go new file mode 100644 index 0000000..999df42 --- /dev/null +++ b/chat.go @@ -0,0 +1,64 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "github.com/openai/openai-go/option" +) + +// ChatService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewChatService] method instead. +type ChatService struct { + Options []option.RequestOption + Completions *ChatCompletionService +} + +// NewChatService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewChatService(opts ...option.RequestOption) (r *ChatService) { + r = &ChatService{} + r.Options = opts + r.Completions = NewChatCompletionService(opts...) + return +} + +type ChatModel string + +const ( + ChatModelGPT4o ChatModel = "gpt-4o" + ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13" + ChatModelGPT4oMini ChatModel = "gpt-4o-mini" + ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18" + ChatModelGPT4Turbo ChatModel = "gpt-4-turbo" + ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09" + ChatModelGPT4_0125Preview ChatModel = "gpt-4-0125-preview" + ChatModelGPT4TurboPreview ChatModel = "gpt-4-turbo-preview" + ChatModelGPT4_1106Preview ChatModel = "gpt-4-1106-preview" + ChatModelGPT4VisionPreview ChatModel = "gpt-4-vision-preview" + ChatModelGPT4 ChatModel = "gpt-4" + ChatModelGPT4_0314 ChatModel = "gpt-4-0314" + ChatModelGPT4_0613 ChatModel = "gpt-4-0613" + ChatModelGPT4_32k ChatModel = "gpt-4-32k" + ChatModelGPT4_32k0314 ChatModel = "gpt-4-32k-0314" + ChatModelGPT4_32k0613 ChatModel = "gpt-4-32k-0613" + ChatModelGPT3_5Turbo ChatModel = "gpt-3.5-turbo" + ChatModelGPT3_5Turbo16k ChatModel = "gpt-3.5-turbo-16k" + ChatModelGPT3_5Turbo0301 ChatModel = "gpt-3.5-turbo-0301" + ChatModelGPT3_5Turbo0613 ChatModel = "gpt-3.5-turbo-0613" + ChatModelGPT3_5Turbo1106 ChatModel = "gpt-3.5-turbo-1106" + ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125" + ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613" +) + +func (r ChatModel) IsKnown() bool { + switch r { + case ChatModelGPT4o, ChatModelGPT4o2024_05_13, ChatModelGPT4oMini, ChatModelGPT4oMini2024_07_18, ChatModelGPT4Turbo, ChatModelGPT4Turbo2024_04_09, ChatModelGPT4_0125Preview, ChatModelGPT4TurboPreview, ChatModelGPT4_1106Preview, ChatModelGPT4VisionPreview, ChatModelGPT4, ChatModelGPT4_0314, ChatModelGPT4_0613, ChatModelGPT4_32k, ChatModelGPT4_32k0314, ChatModelGPT4_32k0613, ChatModelGPT3_5Turbo, ChatModelGPT3_5Turbo16k, ChatModelGPT3_5Turbo0301, ChatModelGPT3_5Turbo0613, ChatModelGPT3_5Turbo1106, ChatModelGPT3_5Turbo0125, ChatModelGPT3_5Turbo16k0613: + return true + } + return false +} diff --git a/chatcompletion.go b/chatcompletion.go new file mode 100644 index 0000000..4dcd393 --- /dev/null +++ b/chatcompletion.go @@ -0,0 +1,1546 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/packages/ssestream" + "github.com/openai/openai-go/shared" +) + +// ChatCompletionService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewChatCompletionService] method instead. +type ChatCompletionService struct { + Options []option.RequestOption +} + +// NewChatCompletionService generates a new service that applies the given options +// to each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewChatCompletionService(opts ...option.RequestOption) (r *ChatCompletionService) { + r = &ChatCompletionService{} + r.Options = opts + return +} + +// Creates a model response for the given chat conversation. +func (r *ChatCompletionService) New(ctx context.Context, body ChatCompletionNewParams, opts ...option.RequestOption) (res *ChatCompletion, err error) { + opts = append(r.Options[:], opts...) + path := "chat/completions" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Creates a model response for the given chat conversation. +func (r *ChatCompletionService) NewStreaming(ctx context.Context, body ChatCompletionNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[ChatCompletionChunk]) { + var ( + raw *http.Response + err error + ) + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...) + path := "chat/completions" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...) + return ssestream.NewStream[ChatCompletionChunk](ssestream.NewDecoder(raw), err) +} + +// Represents a chat completion response returned by model, based on the provided +// input. +type ChatCompletion struct { + // A unique identifier for the chat completion. + ID string `json:"id,required"` + // A list of chat completion choices. Can be more than one if `n` is greater + // than 1. + Choices []ChatCompletionChoice `json:"choices,required"` + // The Unix timestamp (in seconds) of when the chat completion was created. + Created int64 `json:"created,required"` + // The model used for the chat completion. + Model string `json:"model,required"` + // The object type, which is always `chat.completion`. + Object ChatCompletionObject `json:"object,required"` + // The service tier used for processing the request. This field is only included if + // the `service_tier` parameter is specified in the request. + ServiceTier ChatCompletionServiceTier `json:"service_tier,nullable"` + // This fingerprint represents the backend configuration that the model runs with. + // + // Can be used in conjunction with the `seed` request parameter to understand when + // backend changes have been made that might impact determinism. + SystemFingerprint string `json:"system_fingerprint"` + // Usage statistics for the completion request. + Usage CompletionUsage `json:"usage"` + JSON chatCompletionJSON `json:"-"` +} + +// chatCompletionJSON contains the JSON metadata for the struct [ChatCompletion] +type chatCompletionJSON struct { + ID apijson.Field + Choices apijson.Field + Created apijson.Field + Model apijson.Field + Object apijson.Field + ServiceTier apijson.Field + SystemFingerprint apijson.Field + Usage apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletion) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionJSON) RawJSON() string { + return r.raw +} + +type ChatCompletionChoice struct { + // The reason the model stopped generating tokens. This will be `stop` if the model + // hit a natural stop point or a provided stop sequence, `length` if the maximum + // number of tokens specified in the request was reached, `content_filter` if + // content was omitted due to a flag from our content filters, `tool_calls` if the + // model called a tool, or `function_call` (deprecated) if the model called a + // function. + FinishReason ChatCompletionChoicesFinishReason `json:"finish_reason,required"` + // The index of the choice in the list of choices. + Index int64 `json:"index,required"` + // Log probability information for the choice. + Logprobs ChatCompletionChoicesLogprobs `json:"logprobs,required,nullable"` + // A chat completion message generated by the model. + Message ChatCompletionMessage `json:"message,required"` + JSON chatCompletionChoiceJSON `json:"-"` +} + +// chatCompletionChoiceJSON contains the JSON metadata for the struct +// [ChatCompletionChoice] +type chatCompletionChoiceJSON struct { + FinishReason apijson.Field + Index apijson.Field + Logprobs apijson.Field + Message apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChoice) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChoiceJSON) RawJSON() string { + return r.raw +} + +// The reason the model stopped generating tokens. This will be `stop` if the model +// hit a natural stop point or a provided stop sequence, `length` if the maximum +// number of tokens specified in the request was reached, `content_filter` if +// content was omitted due to a flag from our content filters, `tool_calls` if the +// model called a tool, or `function_call` (deprecated) if the model called a +// function. +type ChatCompletionChoicesFinishReason string + +const ( + ChatCompletionChoicesFinishReasonStop ChatCompletionChoicesFinishReason = "stop" + ChatCompletionChoicesFinishReasonLength ChatCompletionChoicesFinishReason = "length" + ChatCompletionChoicesFinishReasonToolCalls ChatCompletionChoicesFinishReason = "tool_calls" + ChatCompletionChoicesFinishReasonContentFilter ChatCompletionChoicesFinishReason = "content_filter" + ChatCompletionChoicesFinishReasonFunctionCall ChatCompletionChoicesFinishReason = "function_call" +) + +func (r ChatCompletionChoicesFinishReason) IsKnown() bool { + switch r { + case ChatCompletionChoicesFinishReasonStop, ChatCompletionChoicesFinishReasonLength, ChatCompletionChoicesFinishReasonToolCalls, ChatCompletionChoicesFinishReasonContentFilter, ChatCompletionChoicesFinishReasonFunctionCall: + return true + } + return false +} + +// Log probability information for the choice. +type ChatCompletionChoicesLogprobs struct { + // A list of message content tokens with log probability information. + Content []ChatCompletionTokenLogprob `json:"content,required,nullable"` + JSON chatCompletionChoicesLogprobsJSON `json:"-"` +} + +// chatCompletionChoicesLogprobsJSON contains the JSON metadata for the struct +// [ChatCompletionChoicesLogprobs] +type chatCompletionChoicesLogprobsJSON struct { + Content apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChoicesLogprobs) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChoicesLogprobsJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `chat.completion`. +type ChatCompletionObject string + +const ( + ChatCompletionObjectChatCompletion ChatCompletionObject = "chat.completion" +) + +func (r ChatCompletionObject) IsKnown() bool { + switch r { + case ChatCompletionObjectChatCompletion: + return true + } + return false +} + +// The service tier used for processing the request. This field is only included if +// the `service_tier` parameter is specified in the request. +type ChatCompletionServiceTier string + +const ( + ChatCompletionServiceTierScale ChatCompletionServiceTier = "scale" + ChatCompletionServiceTierDefault ChatCompletionServiceTier = "default" +) + +func (r ChatCompletionServiceTier) IsKnown() bool { + switch r { + case ChatCompletionServiceTierScale, ChatCompletionServiceTierDefault: + return true + } + return false +} + +type ChatCompletionAssistantMessageParam struct { + // The role of the messages author, in this case `assistant`. + Role param.Field[ChatCompletionAssistantMessageParamRole] `json:"role,required"` + // The contents of the assistant message. Required unless `tool_calls` or + // `function_call` is specified. + Content param.Field[string] `json:"content"` + // Deprecated and replaced by `tool_calls`. The name and arguments of a function + // that should be called, as generated by the model. + FunctionCall param.Field[ChatCompletionAssistantMessageParamFunctionCall] `json:"function_call"` + // An optional name for the participant. Provides the model information to + // differentiate between participants of the same role. + Name param.Field[string] `json:"name"` + // The tool calls generated by the model, such as function calls. + ToolCalls param.Field[[]ChatCompletionMessageToolCallParam] `json:"tool_calls"` +} + +func (r ChatCompletionAssistantMessageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionAssistantMessageParam) implementsChatCompletionMessageParamUnion() {} + +// The role of the messages author, in this case `assistant`. +type ChatCompletionAssistantMessageParamRole string + +const ( + ChatCompletionAssistantMessageParamRoleAssistant ChatCompletionAssistantMessageParamRole = "assistant" +) + +func (r ChatCompletionAssistantMessageParamRole) IsKnown() bool { + switch r { + case ChatCompletionAssistantMessageParamRoleAssistant: + return true + } + return false +} + +// Deprecated and replaced by `tool_calls`. The name and arguments of a function +// that should be called, as generated by the model. +type ChatCompletionAssistantMessageParamFunctionCall struct { + // The arguments to call the function with, as generated by the model in JSON + // format. Note that the model does not always generate valid JSON, and may + // hallucinate parameters not defined by your function schema. Validate the + // arguments in your code before calling your function. + Arguments param.Field[string] `json:"arguments,required"` + // The name of the function to call. + Name param.Field[string] `json:"name,required"` +} + +func (r ChatCompletionAssistantMessageParamFunctionCall) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Represents a streamed chunk of a chat completion response returned by model, +// based on the provided input. +type ChatCompletionChunk struct { + // A unique identifier for the chat completion. Each chunk has the same ID. + ID string `json:"id,required"` + // A list of chat completion choices. Can contain more than one elements if `n` is + // greater than 1. Can also be empty for the last chunk if you set + // `stream_options: {"include_usage": true}`. + Choices []ChatCompletionChunkChoice `json:"choices,required"` + // The Unix timestamp (in seconds) of when the chat completion was created. Each + // chunk has the same timestamp. + Created int64 `json:"created,required"` + // The model to generate the completion. + Model string `json:"model,required"` + // The object type, which is always `chat.completion.chunk`. + Object ChatCompletionChunkObject `json:"object,required"` + // The service tier used for processing the request. This field is only included if + // the `service_tier` parameter is specified in the request. + ServiceTier ChatCompletionChunkServiceTier `json:"service_tier,nullable"` + // This fingerprint represents the backend configuration that the model runs with. + // Can be used in conjunction with the `seed` request parameter to understand when + // backend changes have been made that might impact determinism. + SystemFingerprint string `json:"system_fingerprint"` + // An optional field that will only be present when you set + // `stream_options: {"include_usage": true}` in your request. When present, it + // contains a null value except for the last chunk which contains the token usage + // statistics for the entire request. + Usage CompletionUsage `json:"usage"` + JSON chatCompletionChunkJSON `json:"-"` +} + +// chatCompletionChunkJSON contains the JSON metadata for the struct +// [ChatCompletionChunk] +type chatCompletionChunkJSON struct { + ID apijson.Field + Choices apijson.Field + Created apijson.Field + Model apijson.Field + Object apijson.Field + ServiceTier apijson.Field + SystemFingerprint apijson.Field + Usage apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunk) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkJSON) RawJSON() string { + return r.raw +} + +type ChatCompletionChunkChoice struct { + // A chat completion delta generated by streamed model responses. + Delta ChatCompletionChunkChoicesDelta `json:"delta,required"` + // The reason the model stopped generating tokens. This will be `stop` if the model + // hit a natural stop point or a provided stop sequence, `length` if the maximum + // number of tokens specified in the request was reached, `content_filter` if + // content was omitted due to a flag from our content filters, `tool_calls` if the + // model called a tool, or `function_call` (deprecated) if the model called a + // function. + FinishReason ChatCompletionChunkChoicesFinishReason `json:"finish_reason,required,nullable"` + // The index of the choice in the list of choices. + Index int64 `json:"index,required"` + // Log probability information for the choice. + Logprobs ChatCompletionChunkChoicesLogprobs `json:"logprobs,nullable"` + JSON chatCompletionChunkChoiceJSON `json:"-"` +} + +// chatCompletionChunkChoiceJSON contains the JSON metadata for the struct +// [ChatCompletionChunkChoice] +type chatCompletionChunkChoiceJSON struct { + Delta apijson.Field + FinishReason apijson.Field + Index apijson.Field + Logprobs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunkChoice) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkChoiceJSON) RawJSON() string { + return r.raw +} + +// A chat completion delta generated by streamed model responses. +type ChatCompletionChunkChoicesDelta struct { + // The contents of the chunk message. + Content string `json:"content,nullable"` + // Deprecated and replaced by `tool_calls`. The name and arguments of a function + // that should be called, as generated by the model. + FunctionCall ChatCompletionChunkChoicesDeltaFunctionCall `json:"function_call"` + // The role of the author of this message. + Role ChatCompletionChunkChoicesDeltaRole `json:"role"` + ToolCalls []ChatCompletionChunkChoicesDeltaToolCall `json:"tool_calls"` + JSON chatCompletionChunkChoicesDeltaJSON `json:"-"` +} + +// chatCompletionChunkChoicesDeltaJSON contains the JSON metadata for the struct +// [ChatCompletionChunkChoicesDelta] +type chatCompletionChunkChoicesDeltaJSON struct { + Content apijson.Field + FunctionCall apijson.Field + Role apijson.Field + ToolCalls apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunkChoicesDelta) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkChoicesDeltaJSON) RawJSON() string { + return r.raw +} + +// Deprecated and replaced by `tool_calls`. The name and arguments of a function +// that should be called, as generated by the model. +type ChatCompletionChunkChoicesDeltaFunctionCall struct { + // The arguments to call the function with, as generated by the model in JSON + // format. Note that the model does not always generate valid JSON, and may + // hallucinate parameters not defined by your function schema. Validate the + // arguments in your code before calling your function. + Arguments string `json:"arguments"` + // The name of the function to call. + Name string `json:"name"` + JSON chatCompletionChunkChoicesDeltaFunctionCallJSON `json:"-"` +} + +// chatCompletionChunkChoicesDeltaFunctionCallJSON contains the JSON metadata for +// the struct [ChatCompletionChunkChoicesDeltaFunctionCall] +type chatCompletionChunkChoicesDeltaFunctionCallJSON struct { + Arguments apijson.Field + Name apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunkChoicesDeltaFunctionCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkChoicesDeltaFunctionCallJSON) RawJSON() string { + return r.raw +} + +// The role of the author of this message. +type ChatCompletionChunkChoicesDeltaRole string + +const ( + ChatCompletionChunkChoicesDeltaRoleSystem ChatCompletionChunkChoicesDeltaRole = "system" + ChatCompletionChunkChoicesDeltaRoleUser ChatCompletionChunkChoicesDeltaRole = "user" + ChatCompletionChunkChoicesDeltaRoleAssistant ChatCompletionChunkChoicesDeltaRole = "assistant" + ChatCompletionChunkChoicesDeltaRoleTool ChatCompletionChunkChoicesDeltaRole = "tool" +) + +func (r ChatCompletionChunkChoicesDeltaRole) IsKnown() bool { + switch r { + case ChatCompletionChunkChoicesDeltaRoleSystem, ChatCompletionChunkChoicesDeltaRoleUser, ChatCompletionChunkChoicesDeltaRoleAssistant, ChatCompletionChunkChoicesDeltaRoleTool: + return true + } + return false +} + +type ChatCompletionChunkChoicesDeltaToolCall struct { + Index int64 `json:"index,required"` + // The ID of the tool call. + ID string `json:"id"` + Function ChatCompletionChunkChoicesDeltaToolCallsFunction `json:"function"` + // The type of the tool. Currently, only `function` is supported. + Type ChatCompletionChunkChoicesDeltaToolCallsType `json:"type"` + JSON chatCompletionChunkChoicesDeltaToolCallJSON `json:"-"` +} + +// chatCompletionChunkChoicesDeltaToolCallJSON contains the JSON metadata for the +// struct [ChatCompletionChunkChoicesDeltaToolCall] +type chatCompletionChunkChoicesDeltaToolCallJSON struct { + Index apijson.Field + ID apijson.Field + Function apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunkChoicesDeltaToolCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkChoicesDeltaToolCallJSON) RawJSON() string { + return r.raw +} + +type ChatCompletionChunkChoicesDeltaToolCallsFunction struct { + // The arguments to call the function with, as generated by the model in JSON + // format. Note that the model does not always generate valid JSON, and may + // hallucinate parameters not defined by your function schema. Validate the + // arguments in your code before calling your function. + Arguments string `json:"arguments"` + // The name of the function to call. + Name string `json:"name"` + JSON chatCompletionChunkChoicesDeltaToolCallsFunctionJSON `json:"-"` +} + +// chatCompletionChunkChoicesDeltaToolCallsFunctionJSON contains the JSON metadata +// for the struct [ChatCompletionChunkChoicesDeltaToolCallsFunction] +type chatCompletionChunkChoicesDeltaToolCallsFunctionJSON struct { + Arguments apijson.Field + Name apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunkChoicesDeltaToolCallsFunction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkChoicesDeltaToolCallsFunctionJSON) RawJSON() string { + return r.raw +} + +// The type of the tool. Currently, only `function` is supported. +type ChatCompletionChunkChoicesDeltaToolCallsType string + +const ( + ChatCompletionChunkChoicesDeltaToolCallsTypeFunction ChatCompletionChunkChoicesDeltaToolCallsType = "function" +) + +func (r ChatCompletionChunkChoicesDeltaToolCallsType) IsKnown() bool { + switch r { + case ChatCompletionChunkChoicesDeltaToolCallsTypeFunction: + return true + } + return false +} + +// The reason the model stopped generating tokens. This will be `stop` if the model +// hit a natural stop point or a provided stop sequence, `length` if the maximum +// number of tokens specified in the request was reached, `content_filter` if +// content was omitted due to a flag from our content filters, `tool_calls` if the +// model called a tool, or `function_call` (deprecated) if the model called a +// function. +type ChatCompletionChunkChoicesFinishReason string + +const ( + ChatCompletionChunkChoicesFinishReasonStop ChatCompletionChunkChoicesFinishReason = "stop" + ChatCompletionChunkChoicesFinishReasonLength ChatCompletionChunkChoicesFinishReason = "length" + ChatCompletionChunkChoicesFinishReasonToolCalls ChatCompletionChunkChoicesFinishReason = "tool_calls" + ChatCompletionChunkChoicesFinishReasonContentFilter ChatCompletionChunkChoicesFinishReason = "content_filter" + ChatCompletionChunkChoicesFinishReasonFunctionCall ChatCompletionChunkChoicesFinishReason = "function_call" +) + +func (r ChatCompletionChunkChoicesFinishReason) IsKnown() bool { + switch r { + case ChatCompletionChunkChoicesFinishReasonStop, ChatCompletionChunkChoicesFinishReasonLength, ChatCompletionChunkChoicesFinishReasonToolCalls, ChatCompletionChunkChoicesFinishReasonContentFilter, ChatCompletionChunkChoicesFinishReasonFunctionCall: + return true + } + return false +} + +// Log probability information for the choice. +type ChatCompletionChunkChoicesLogprobs struct { + // A list of message content tokens with log probability information. + Content []ChatCompletionTokenLogprob `json:"content,required,nullable"` + JSON chatCompletionChunkChoicesLogprobsJSON `json:"-"` +} + +// chatCompletionChunkChoicesLogprobsJSON contains the JSON metadata for the struct +// [ChatCompletionChunkChoicesLogprobs] +type chatCompletionChunkChoicesLogprobsJSON struct { + Content apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionChunkChoicesLogprobs) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionChunkChoicesLogprobsJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `chat.completion.chunk`. +type ChatCompletionChunkObject string + +const ( + ChatCompletionChunkObjectChatCompletionChunk ChatCompletionChunkObject = "chat.completion.chunk" +) + +func (r ChatCompletionChunkObject) IsKnown() bool { + switch r { + case ChatCompletionChunkObjectChatCompletionChunk: + return true + } + return false +} + +// The service tier used for processing the request. This field is only included if +// the `service_tier` parameter is specified in the request. +type ChatCompletionChunkServiceTier string + +const ( + ChatCompletionChunkServiceTierScale ChatCompletionChunkServiceTier = "scale" + ChatCompletionChunkServiceTierDefault ChatCompletionChunkServiceTier = "default" +) + +func (r ChatCompletionChunkServiceTier) IsKnown() bool { + switch r { + case ChatCompletionChunkServiceTierScale, ChatCompletionChunkServiceTierDefault: + return true + } + return false +} + +type ChatCompletionContentPartParam struct { + // The type of the content part. + Type param.Field[ChatCompletionContentPartType] `json:"type,required"` + // The text content. + Text param.Field[string] `json:"text"` + ImageURL param.Field[interface{}] `json:"image_url,required"` +} + +func (r ChatCompletionContentPartParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionContentPartParam) implementsChatCompletionContentPartUnionParam() {} + +// Satisfied by [ChatCompletionContentPartTextParam], +// [ChatCompletionContentPartImageParam], [ChatCompletionContentPartParam]. +type ChatCompletionContentPartUnionParam interface { + implementsChatCompletionContentPartUnionParam() +} + +// The type of the content part. +type ChatCompletionContentPartType string + +const ( + ChatCompletionContentPartTypeText ChatCompletionContentPartType = "text" + ChatCompletionContentPartTypeImageURL ChatCompletionContentPartType = "image_url" +) + +func (r ChatCompletionContentPartType) IsKnown() bool { + switch r { + case ChatCompletionContentPartTypeText, ChatCompletionContentPartTypeImageURL: + return true + } + return false +} + +type ChatCompletionContentPartImageParam struct { + ImageURL param.Field[ChatCompletionContentPartImageImageURLParam] `json:"image_url,required"` + // The type of the content part. + Type param.Field[ChatCompletionContentPartImageType] `json:"type,required"` +} + +func (r ChatCompletionContentPartImageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionContentPartImageParam) implementsChatCompletionContentPartUnionParam() {} + +type ChatCompletionContentPartImageImageURLParam struct { + // Either a URL of the image or the base64 encoded image data. + URL param.Field[string] `json:"url,required" format:"uri"` + // Specifies the detail level of the image. Learn more in the + // [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + Detail param.Field[ChatCompletionContentPartImageImageURLDetail] `json:"detail"` +} + +func (r ChatCompletionContentPartImageImageURLParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Specifies the detail level of the image. Learn more in the +// [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). +type ChatCompletionContentPartImageImageURLDetail string + +const ( + ChatCompletionContentPartImageImageURLDetailAuto ChatCompletionContentPartImageImageURLDetail = "auto" + ChatCompletionContentPartImageImageURLDetailLow ChatCompletionContentPartImageImageURLDetail = "low" + ChatCompletionContentPartImageImageURLDetailHigh ChatCompletionContentPartImageImageURLDetail = "high" +) + +func (r ChatCompletionContentPartImageImageURLDetail) IsKnown() bool { + switch r { + case ChatCompletionContentPartImageImageURLDetailAuto, ChatCompletionContentPartImageImageURLDetailLow, ChatCompletionContentPartImageImageURLDetailHigh: + return true + } + return false +} + +// The type of the content part. +type ChatCompletionContentPartImageType string + +const ( + ChatCompletionContentPartImageTypeImageURL ChatCompletionContentPartImageType = "image_url" +) + +func (r ChatCompletionContentPartImageType) IsKnown() bool { + switch r { + case ChatCompletionContentPartImageTypeImageURL: + return true + } + return false +} + +type ChatCompletionContentPartTextParam struct { + // The text content. + Text param.Field[string] `json:"text,required"` + // The type of the content part. + Type param.Field[ChatCompletionContentPartTextType] `json:"type,required"` +} + +func (r ChatCompletionContentPartTextParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionContentPartTextParam) implementsChatCompletionContentPartUnionParam() {} + +// The type of the content part. +type ChatCompletionContentPartTextType string + +const ( + ChatCompletionContentPartTextTypeText ChatCompletionContentPartTextType = "text" +) + +func (r ChatCompletionContentPartTextType) IsKnown() bool { + switch r { + case ChatCompletionContentPartTextTypeText: + return true + } + return false +} + +// Specifying a particular function via `{"name": "my_function"}` forces the model +// to call that function. +type ChatCompletionFunctionCallOptionParam struct { + // The name of the function to call. + Name param.Field[string] `json:"name,required"` +} + +func (r ChatCompletionFunctionCallOptionParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionFunctionCallOptionParam) implementsChatCompletionNewParamsFunctionCallUnion() {} + +type ChatCompletionFunctionMessageParam struct { + // The contents of the function message. + Content param.Field[string] `json:"content,required"` + // The name of the function to call. + Name param.Field[string] `json:"name,required"` + // The role of the messages author, in this case `function`. + Role param.Field[ChatCompletionFunctionMessageParamRole] `json:"role,required"` +} + +func (r ChatCompletionFunctionMessageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionFunctionMessageParam) implementsChatCompletionMessageParamUnion() {} + +// The role of the messages author, in this case `function`. +type ChatCompletionFunctionMessageParamRole string + +const ( + ChatCompletionFunctionMessageParamRoleFunction ChatCompletionFunctionMessageParamRole = "function" +) + +func (r ChatCompletionFunctionMessageParamRole) IsKnown() bool { + switch r { + case ChatCompletionFunctionMessageParamRoleFunction: + return true + } + return false +} + +// A chat completion message generated by the model. +type ChatCompletionMessage struct { + // The contents of the message. + Content string `json:"content,required,nullable"` + // The role of the author of this message. + Role ChatCompletionMessageRole `json:"role,required"` + // Deprecated and replaced by `tool_calls`. The name and arguments of a function + // that should be called, as generated by the model. + FunctionCall ChatCompletionMessageFunctionCall `json:"function_call"` + // The tool calls generated by the model, such as function calls. + ToolCalls []ChatCompletionMessageToolCall `json:"tool_calls"` + JSON chatCompletionMessageJSON `json:"-"` +} + +// chatCompletionMessageJSON contains the JSON metadata for the struct +// [ChatCompletionMessage] +type chatCompletionMessageJSON struct { + Content apijson.Field + Role apijson.Field + FunctionCall apijson.Field + ToolCalls apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionMessage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionMessageJSON) RawJSON() string { + return r.raw +} + +// The role of the author of this message. +type ChatCompletionMessageRole string + +const ( + ChatCompletionMessageRoleAssistant ChatCompletionMessageRole = "assistant" +) + +func (r ChatCompletionMessageRole) IsKnown() bool { + switch r { + case ChatCompletionMessageRoleAssistant: + return true + } + return false +} + +// Deprecated and replaced by `tool_calls`. The name and arguments of a function +// that should be called, as generated by the model. +type ChatCompletionMessageFunctionCall struct { + // The arguments to call the function with, as generated by the model in JSON + // format. Note that the model does not always generate valid JSON, and may + // hallucinate parameters not defined by your function schema. Validate the + // arguments in your code before calling your function. + Arguments string `json:"arguments,required"` + // The name of the function to call. + Name string `json:"name,required"` + JSON chatCompletionMessageFunctionCallJSON `json:"-"` +} + +// chatCompletionMessageFunctionCallJSON contains the JSON metadata for the struct +// [ChatCompletionMessageFunctionCall] +type chatCompletionMessageFunctionCallJSON struct { + Arguments apijson.Field + Name apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionMessageFunctionCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionMessageFunctionCallJSON) RawJSON() string { + return r.raw +} + +type ChatCompletionMessageParam struct { + Content param.Field[interface{}] `json:"content,required"` + // The role of the messages author, in this case `system`. + Role param.Field[ChatCompletionMessageParamRole] `json:"role,required"` + // An optional name for the participant. Provides the model information to + // differentiate between participants of the same role. + Name param.Field[string] `json:"name"` + ToolCalls param.Field[interface{}] `json:"tool_calls,required"` + FunctionCall param.Field[interface{}] `json:"function_call,required"` + // Tool call that this message is responding to. + ToolCallID param.Field[string] `json:"tool_call_id"` +} + +func (r ChatCompletionMessageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionMessageParam) implementsChatCompletionMessageParamUnion() {} + +// Satisfied by [ChatCompletionSystemMessageParam], +// [ChatCompletionUserMessageParam], [ChatCompletionAssistantMessageParam], +// [ChatCompletionToolMessageParam], [ChatCompletionFunctionMessageParam], +// [ChatCompletionMessageParam]. +type ChatCompletionMessageParamUnion interface { + implementsChatCompletionMessageParamUnion() +} + +// The role of the messages author, in this case `system`. +type ChatCompletionMessageParamRole string + +const ( + ChatCompletionMessageParamRoleSystem ChatCompletionMessageParamRole = "system" + ChatCompletionMessageParamRoleUser ChatCompletionMessageParamRole = "user" + ChatCompletionMessageParamRoleAssistant ChatCompletionMessageParamRole = "assistant" + ChatCompletionMessageParamRoleTool ChatCompletionMessageParamRole = "tool" + ChatCompletionMessageParamRoleFunction ChatCompletionMessageParamRole = "function" +) + +func (r ChatCompletionMessageParamRole) IsKnown() bool { + switch r { + case ChatCompletionMessageParamRoleSystem, ChatCompletionMessageParamRoleUser, ChatCompletionMessageParamRoleAssistant, ChatCompletionMessageParamRoleTool, ChatCompletionMessageParamRoleFunction: + return true + } + return false +} + +type ChatCompletionMessageToolCall struct { + // The ID of the tool call. + ID string `json:"id,required"` + // The function that the model called. + Function ChatCompletionMessageToolCallFunction `json:"function,required"` + // The type of the tool. Currently, only `function` is supported. + Type ChatCompletionMessageToolCallType `json:"type,required"` + JSON chatCompletionMessageToolCallJSON `json:"-"` +} + +// chatCompletionMessageToolCallJSON contains the JSON metadata for the struct +// [ChatCompletionMessageToolCall] +type chatCompletionMessageToolCallJSON struct { + ID apijson.Field + Function apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionMessageToolCall) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionMessageToolCallJSON) RawJSON() string { + return r.raw +} + +// The function that the model called. +type ChatCompletionMessageToolCallFunction struct { + // The arguments to call the function with, as generated by the model in JSON + // format. Note that the model does not always generate valid JSON, and may + // hallucinate parameters not defined by your function schema. Validate the + // arguments in your code before calling your function. + Arguments string `json:"arguments,required"` + // The name of the function to call. + Name string `json:"name,required"` + JSON chatCompletionMessageToolCallFunctionJSON `json:"-"` +} + +// chatCompletionMessageToolCallFunctionJSON contains the JSON metadata for the +// struct [ChatCompletionMessageToolCallFunction] +type chatCompletionMessageToolCallFunctionJSON struct { + Arguments apijson.Field + Name apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionMessageToolCallFunction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionMessageToolCallFunctionJSON) RawJSON() string { + return r.raw +} + +// The type of the tool. Currently, only `function` is supported. +type ChatCompletionMessageToolCallType string + +const ( + ChatCompletionMessageToolCallTypeFunction ChatCompletionMessageToolCallType = "function" +) + +func (r ChatCompletionMessageToolCallType) IsKnown() bool { + switch r { + case ChatCompletionMessageToolCallTypeFunction: + return true + } + return false +} + +type ChatCompletionMessageToolCallParam struct { + // The ID of the tool call. + ID param.Field[string] `json:"id,required"` + // The function that the model called. + Function param.Field[ChatCompletionMessageToolCallFunctionParam] `json:"function,required"` + // The type of the tool. Currently, only `function` is supported. + Type param.Field[ChatCompletionMessageToolCallType] `json:"type,required"` +} + +func (r ChatCompletionMessageToolCallParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The function that the model called. +type ChatCompletionMessageToolCallFunctionParam struct { + // The arguments to call the function with, as generated by the model in JSON + // format. Note that the model does not always generate valid JSON, and may + // hallucinate parameters not defined by your function schema. Validate the + // arguments in your code before calling your function. + Arguments param.Field[string] `json:"arguments,required"` + // The name of the function to call. + Name param.Field[string] `json:"name,required"` +} + +func (r ChatCompletionMessageToolCallFunctionParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Specifies a tool the model should use. Use to force the model to call a specific +// function. +type ChatCompletionNamedToolChoiceParam struct { + Function param.Field[ChatCompletionNamedToolChoiceFunctionParam] `json:"function,required"` + // The type of the tool. Currently, only `function` is supported. + Type param.Field[ChatCompletionNamedToolChoiceType] `json:"type,required"` +} + +func (r ChatCompletionNamedToolChoiceParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionNamedToolChoiceParam) implementsChatCompletionToolChoiceOptionUnionParam() {} + +type ChatCompletionNamedToolChoiceFunctionParam struct { + // The name of the function to call. + Name param.Field[string] `json:"name,required"` +} + +func (r ChatCompletionNamedToolChoiceFunctionParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The type of the tool. Currently, only `function` is supported. +type ChatCompletionNamedToolChoiceType string + +const ( + ChatCompletionNamedToolChoiceTypeFunction ChatCompletionNamedToolChoiceType = "function" +) + +func (r ChatCompletionNamedToolChoiceType) IsKnown() bool { + switch r { + case ChatCompletionNamedToolChoiceTypeFunction: + return true + } + return false +} + +// Options for streaming response. Only set this when you set `stream: true`. +type ChatCompletionStreamOptionsParam struct { + // If set, an additional chunk will be streamed before the `data: [DONE]` message. + // The `usage` field on this chunk shows the token usage statistics for the entire + // request, and the `choices` field will always be an empty array. All other chunks + // will also include a `usage` field, but with a null value. + IncludeUsage param.Field[bool] `json:"include_usage"` +} + +func (r ChatCompletionStreamOptionsParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type ChatCompletionSystemMessageParam struct { + // The contents of the system message. + Content param.Field[string] `json:"content,required"` + // The role of the messages author, in this case `system`. + Role param.Field[ChatCompletionSystemMessageParamRole] `json:"role,required"` + // An optional name for the participant. Provides the model information to + // differentiate between participants of the same role. + Name param.Field[string] `json:"name"` +} + +func (r ChatCompletionSystemMessageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionSystemMessageParam) implementsChatCompletionMessageParamUnion() {} + +// The role of the messages author, in this case `system`. +type ChatCompletionSystemMessageParamRole string + +const ( + ChatCompletionSystemMessageParamRoleSystem ChatCompletionSystemMessageParamRole = "system" +) + +func (r ChatCompletionSystemMessageParamRole) IsKnown() bool { + switch r { + case ChatCompletionSystemMessageParamRoleSystem: + return true + } + return false +} + +type ChatCompletionTokenLogprob struct { + // The token. + Token string `json:"token,required"` + // A list of integers representing the UTF-8 bytes representation of the token. + // Useful in instances where characters are represented by multiple tokens and + // their byte representations must be combined to generate the correct text + // representation. Can be `null` if there is no bytes representation for the token. + Bytes []int64 `json:"bytes,required,nullable"` + // The log probability of this token, if it is within the top 20 most likely + // tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + // unlikely. + Logprob float64 `json:"logprob,required"` + // List of the most likely tokens and their log probability, at this token + // position. In rare cases, there may be fewer than the number of requested + // `top_logprobs` returned. + TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs,required"` + JSON chatCompletionTokenLogprobJSON `json:"-"` +} + +// chatCompletionTokenLogprobJSON contains the JSON metadata for the struct +// [ChatCompletionTokenLogprob] +type chatCompletionTokenLogprobJSON struct { + Token apijson.Field + Bytes apijson.Field + Logprob apijson.Field + TopLogprobs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionTokenLogprob) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionTokenLogprobJSON) RawJSON() string { + return r.raw +} + +type ChatCompletionTokenLogprobTopLogprob struct { + // The token. + Token string `json:"token,required"` + // A list of integers representing the UTF-8 bytes representation of the token. + // Useful in instances where characters are represented by multiple tokens and + // their byte representations must be combined to generate the correct text + // representation. Can be `null` if there is no bytes representation for the token. + Bytes []int64 `json:"bytes,required,nullable"` + // The log probability of this token, if it is within the top 20 most likely + // tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + // unlikely. + Logprob float64 `json:"logprob,required"` + JSON chatCompletionTokenLogprobTopLogprobJSON `json:"-"` +} + +// chatCompletionTokenLogprobTopLogprobJSON contains the JSON metadata for the +// struct [ChatCompletionTokenLogprobTopLogprob] +type chatCompletionTokenLogprobTopLogprobJSON struct { + Token apijson.Field + Bytes apijson.Field + Logprob apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ChatCompletionTokenLogprobTopLogprob) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r chatCompletionTokenLogprobTopLogprobJSON) RawJSON() string { + return r.raw +} + +type ChatCompletionToolParam struct { + Function param.Field[shared.FunctionDefinitionParam] `json:"function,required"` + // The type of the tool. Currently, only `function` is supported. + Type param.Field[ChatCompletionToolType] `json:"type,required"` +} + +func (r ChatCompletionToolParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The type of the tool. Currently, only `function` is supported. +type ChatCompletionToolType string + +const ( + ChatCompletionToolTypeFunction ChatCompletionToolType = "function" +) + +func (r ChatCompletionToolType) IsKnown() bool { + switch r { + case ChatCompletionToolTypeFunction: + return true + } + return false +} + +// Controls which (if any) tool is called by the model. `none` means the model will +// not call any tool and instead generates a message. `auto` means the model can +// pick between generating a message or calling one or more tools. `required` means +// the model must call one or more tools. Specifying a particular tool via +// `{"type": "function", "function": {"name": "my_function"}}` forces the model to +// call that tool. +// +// `none` is the default when no tools are present. `auto` is the default if tools +// are present. +// +// Satisfied by [ChatCompletionToolChoiceOptionString], +// [ChatCompletionNamedToolChoiceParam]. +type ChatCompletionToolChoiceOptionUnionParam interface { + implementsChatCompletionToolChoiceOptionUnionParam() +} + +// `none` means the model will not call any tool and instead generates a message. +// `auto` means the model can pick between generating a message or calling one or +// more tools. `required` means the model must call one or more tools. +type ChatCompletionToolChoiceOptionString string + +const ( + ChatCompletionToolChoiceOptionStringNone ChatCompletionToolChoiceOptionString = "none" + ChatCompletionToolChoiceOptionStringAuto ChatCompletionToolChoiceOptionString = "auto" + ChatCompletionToolChoiceOptionStringRequired ChatCompletionToolChoiceOptionString = "required" +) + +func (r ChatCompletionToolChoiceOptionString) IsKnown() bool { + switch r { + case ChatCompletionToolChoiceOptionStringNone, ChatCompletionToolChoiceOptionStringAuto, ChatCompletionToolChoiceOptionStringRequired: + return true + } + return false +} + +func (r ChatCompletionToolChoiceOptionString) implementsChatCompletionToolChoiceOptionUnionParam() {} + +type ChatCompletionToolMessageParam struct { + // The contents of the tool message. + Content param.Field[string] `json:"content,required"` + // The role of the messages author, in this case `tool`. + Role param.Field[ChatCompletionToolMessageParamRole] `json:"role,required"` + // Tool call that this message is responding to. + ToolCallID param.Field[string] `json:"tool_call_id,required"` +} + +func (r ChatCompletionToolMessageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionToolMessageParam) implementsChatCompletionMessageParamUnion() {} + +// The role of the messages author, in this case `tool`. +type ChatCompletionToolMessageParamRole string + +const ( + ChatCompletionToolMessageParamRoleTool ChatCompletionToolMessageParamRole = "tool" +) + +func (r ChatCompletionToolMessageParamRole) IsKnown() bool { + switch r { + case ChatCompletionToolMessageParamRoleTool: + return true + } + return false +} + +type ChatCompletionUserMessageParam struct { + // The contents of the user message. + Content param.Field[ChatCompletionUserMessageParamContentUnion] `json:"content,required"` + // The role of the messages author, in this case `user`. + Role param.Field[ChatCompletionUserMessageParamRole] `json:"role,required"` + // An optional name for the participant. Provides the model information to + // differentiate between participants of the same role. + Name param.Field[string] `json:"name"` +} + +func (r ChatCompletionUserMessageParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionUserMessageParam) implementsChatCompletionMessageParamUnion() {} + +// The contents of the user message. +// +// Satisfied by [shared.UnionString], +// [ChatCompletionUserMessageParamContentArrayOfContentParts]. +type ChatCompletionUserMessageParamContentUnion interface { + ImplementsChatCompletionUserMessageParamContentUnion() +} + +type ChatCompletionUserMessageParamContentArrayOfContentParts []ChatCompletionContentPartUnionParam + +func (r ChatCompletionUserMessageParamContentArrayOfContentParts) ImplementsChatCompletionUserMessageParamContentUnion() { +} + +// The role of the messages author, in this case `user`. +type ChatCompletionUserMessageParamRole string + +const ( + ChatCompletionUserMessageParamRoleUser ChatCompletionUserMessageParamRole = "user" +) + +func (r ChatCompletionUserMessageParamRole) IsKnown() bool { + switch r { + case ChatCompletionUserMessageParamRoleUser: + return true + } + return false +} + +type ChatCompletionNewParams struct { + // A list of messages comprising the conversation so far. + // [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + Messages param.Field[[]ChatCompletionMessageParamUnion] `json:"messages,required"` + // ID of the model to use. See the + // [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + // table for details on which models work with the Chat API. + Model param.Field[ChatModel] `json:"model,required"` + // Number between -2.0 and 2.0. Positive values penalize new tokens based on their + // existing frequency in the text so far, decreasing the model's likelihood to + // repeat the same line verbatim. + // + // [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + FrequencyPenalty param.Field[float64] `json:"frequency_penalty"` + // Deprecated in favor of `tool_choice`. + // + // Controls which (if any) function is called by the model. `none` means the model + // will not call a function and instead generates a message. `auto` means the model + // can pick between generating a message or calling a function. Specifying a + // particular function via `{"name": "my_function"}` forces the model to call that + // function. + // + // `none` is the default when no functions are present. `auto` is the default if + // functions are present. + FunctionCall param.Field[ChatCompletionNewParamsFunctionCallUnion] `json:"function_call"` + // Deprecated in favor of `tools`. + // + // A list of functions the model may generate JSON inputs for. + Functions param.Field[[]ChatCompletionNewParamsFunction] `json:"functions"` + // Modify the likelihood of specified tokens appearing in the completion. + // + // Accepts a JSON object that maps tokens (specified by their token ID in the + // tokenizer) to an associated bias value from -100 to 100. Mathematically, the + // bias is added to the logits generated by the model prior to sampling. The exact + // effect will vary per model, but values between -1 and 1 should decrease or + // increase likelihood of selection; values like -100 or 100 should result in a ban + // or exclusive selection of the relevant token. + LogitBias param.Field[map[string]int64] `json:"logit_bias"` + // Whether to return log probabilities of the output tokens or not. If true, + // returns the log probabilities of each output token returned in the `content` of + // `message`. + Logprobs param.Field[bool] `json:"logprobs"` + // The maximum number of [tokens](/tokenizer) that can be generated in the chat + // completion. + // + // The total length of input tokens and generated tokens is limited by the model's + // context length. + // [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + // for counting tokens. + MaxTokens param.Field[int64] `json:"max_tokens"` + // How many chat completion choices to generate for each input message. Note that + // you will be charged based on the number of generated tokens across all of the + // choices. Keep `n` as `1` to minimize costs. + N param.Field[int64] `json:"n"` + // Whether to enable + // [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + // during tool use. + ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"` + // Number between -2.0 and 2.0. Positive values penalize new tokens based on + // whether they appear in the text so far, increasing the model's likelihood to + // talk about new topics. + // + // [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + PresencePenalty param.Field[float64] `json:"presence_penalty"` + // An object specifying the format that the model must output. Compatible with + // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + // all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + // + // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + // message the model generates is valid JSON. + // + // **Important:** when using JSON mode, you **must** also instruct the model to + // produce JSON yourself via a system or user message. Without this, the model may + // generate an unending stream of whitespace until the generation reaches the token + // limit, resulting in a long-running and seemingly "stuck" request. Also note that + // the message content may be partially cut off if `finish_reason="length"`, which + // indicates the generation exceeded `max_tokens` or the conversation exceeded the + // max context length. + ResponseFormat param.Field[ChatCompletionNewParamsResponseFormat] `json:"response_format"` + // This feature is in Beta. If specified, our system will make a best effort to + // sample deterministically, such that repeated requests with the same `seed` and + // parameters should return the same result. Determinism is not guaranteed, and you + // should refer to the `system_fingerprint` response parameter to monitor changes + // in the backend. + Seed param.Field[int64] `json:"seed"` + // Specifies the latency tier to use for processing the request. This parameter is + // relevant for customers subscribed to the scale tier service: + // + // - If set to 'auto', the system will utilize scale tier credits until they are + // exhausted. + // - If set to 'default', the request will be processed using the default service + // tier with a lower uptime SLA and no latency guarentee. + // - When not set, the default behavior is 'auto'. + // + // When this parameter is set, the response body will include the `service_tier` + // utilized. + ServiceTier param.Field[ChatCompletionNewParamsServiceTier] `json:"service_tier"` + // Up to 4 sequences where the API will stop generating further tokens. + Stop param.Field[ChatCompletionNewParamsStopUnion] `json:"stop"` + // Options for streaming response. Only set this when you set `stream: true`. + StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + // + // We generally recommend altering this or `top_p` but not both. + Temperature param.Field[float64] `json:"temperature"` + // Controls which (if any) tool is called by the model. `none` means the model will + // not call any tool and instead generates a message. `auto` means the model can + // pick between generating a message or calling one or more tools. `required` means + // the model must call one or more tools. Specifying a particular tool via + // `{"type": "function", "function": {"name": "my_function"}}` forces the model to + // call that tool. + // + // `none` is the default when no tools are present. `auto` is the default if tools + // are present. + ToolChoice param.Field[ChatCompletionToolChoiceOptionUnionParam] `json:"tool_choice"` + // A list of tools the model may call. Currently, only functions are supported as a + // tool. Use this to provide a list of functions the model may generate JSON inputs + // for. A max of 128 functions are supported. + Tools param.Field[[]ChatCompletionToolParam] `json:"tools"` + // An integer between 0 and 20 specifying the number of most likely tokens to + // return at each token position, each with an associated log probability. + // `logprobs` must be set to `true` if this parameter is used. + TopLogprobs param.Field[int64] `json:"top_logprobs"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or `temperature` but not both. + TopP param.Field[float64] `json:"top_p"` + // A unique identifier representing your end-user, which can help OpenAI to monitor + // and detect abuse. + // [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + User param.Field[string] `json:"user"` +} + +func (r ChatCompletionNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Deprecated in favor of `tool_choice`. +// +// Controls which (if any) function is called by the model. `none` means the model +// will not call a function and instead generates a message. `auto` means the model +// can pick between generating a message or calling a function. Specifying a +// particular function via `{"name": "my_function"}` forces the model to call that +// function. +// +// `none` is the default when no functions are present. `auto` is the default if +// functions are present. +// +// Satisfied by [ChatCompletionNewParamsFunctionCallString], +// [ChatCompletionFunctionCallOptionParam]. +type ChatCompletionNewParamsFunctionCallUnion interface { + implementsChatCompletionNewParamsFunctionCallUnion() +} + +// `none` means the model will not call a function and instead generates a message. +// `auto` means the model can pick between generating a message or calling a +// function. +type ChatCompletionNewParamsFunctionCallString string + +const ( + ChatCompletionNewParamsFunctionCallStringNone ChatCompletionNewParamsFunctionCallString = "none" + ChatCompletionNewParamsFunctionCallStringAuto ChatCompletionNewParamsFunctionCallString = "auto" +) + +func (r ChatCompletionNewParamsFunctionCallString) IsKnown() bool { + switch r { + case ChatCompletionNewParamsFunctionCallStringNone, ChatCompletionNewParamsFunctionCallStringAuto: + return true + } + return false +} + +func (r ChatCompletionNewParamsFunctionCallString) implementsChatCompletionNewParamsFunctionCallUnion() { +} + +type ChatCompletionNewParamsFunction struct { + // The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + // underscores and dashes, with a maximum length of 64. + Name param.Field[string] `json:"name,required"` + // A description of what the function does, used by the model to choose when and + // how to call the function. + Description param.Field[string] `json:"description"` + // The parameters the functions accepts, described as a JSON Schema object. See the + // [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + // and the + // [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + // documentation about the format. + // + // Omitting `parameters` defines a function with an empty parameter list. + Parameters param.Field[shared.FunctionParameters] `json:"parameters"` +} + +func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// An object specifying the format that the model must output. Compatible with +// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and +// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +// +// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the +// message the model generates is valid JSON. +// +// **Important:** when using JSON mode, you **must** also instruct the model to +// produce JSON yourself via a system or user message. Without this, the model may +// generate an unending stream of whitespace until the generation reaches the token +// limit, resulting in a long-running and seemingly "stuck" request. Also note that +// the message content may be partially cut off if `finish_reason="length"`, which +// indicates the generation exceeded `max_tokens` or the conversation exceeded the +// max context length. +type ChatCompletionNewParamsResponseFormat struct { + // Must be one of `text` or `json_object`. + Type param.Field[ChatCompletionNewParamsResponseFormatType] `json:"type"` +} + +func (r ChatCompletionNewParamsResponseFormat) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Must be one of `text` or `json_object`. +type ChatCompletionNewParamsResponseFormatType string + +const ( + ChatCompletionNewParamsResponseFormatTypeText ChatCompletionNewParamsResponseFormatType = "text" + ChatCompletionNewParamsResponseFormatTypeJSONObject ChatCompletionNewParamsResponseFormatType = "json_object" +) + +func (r ChatCompletionNewParamsResponseFormatType) IsKnown() bool { + switch r { + case ChatCompletionNewParamsResponseFormatTypeText, ChatCompletionNewParamsResponseFormatTypeJSONObject: + return true + } + return false +} + +// Specifies the latency tier to use for processing the request. This parameter is +// relevant for customers subscribed to the scale tier service: +// +// - If set to 'auto', the system will utilize scale tier credits until they are +// exhausted. +// - If set to 'default', the request will be processed using the default service +// tier with a lower uptime SLA and no latency guarentee. +// - When not set, the default behavior is 'auto'. +// +// When this parameter is set, the response body will include the `service_tier` +// utilized. +type ChatCompletionNewParamsServiceTier string + +const ( + ChatCompletionNewParamsServiceTierAuto ChatCompletionNewParamsServiceTier = "auto" + ChatCompletionNewParamsServiceTierDefault ChatCompletionNewParamsServiceTier = "default" +) + +func (r ChatCompletionNewParamsServiceTier) IsKnown() bool { + switch r { + case ChatCompletionNewParamsServiceTierAuto, ChatCompletionNewParamsServiceTierDefault: + return true + } + return false +} + +// Up to 4 sequences where the API will stop generating further tokens. +// +// Satisfied by [shared.UnionString], [ChatCompletionNewParamsStopArray]. +type ChatCompletionNewParamsStopUnion interface { + ImplementsChatCompletionNewParamsStopUnion() +} + +type ChatCompletionNewParamsStopArray []string + +func (r ChatCompletionNewParamsStopArray) ImplementsChatCompletionNewParamsStopUnion() {} diff --git a/chatcompletion_test.go b/chatcompletion_test.go new file mode 100644 index 0000000..e93623d --- /dev/null +++ b/chatcompletion_test.go @@ -0,0 +1,103 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestChatCompletionNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionSystemMessageParam{ + Content: openai.F("content"), + Role: openai.F(openai.ChatCompletionSystemMessageParamRoleSystem), + Name: openai.F("name"), + }}), + Model: openai.F(openai.ChatModelGPT4o), + FrequencyPenalty: openai.F(-2.000000), + FunctionCall: openai.F[openai.ChatCompletionNewParamsFunctionCallUnion](openai.ChatCompletionNewParamsFunctionCallString(openai.ChatCompletionNewParamsFunctionCallStringNone)), + Functions: openai.F([]openai.ChatCompletionNewParamsFunction{{ + Description: openai.F("description"), + Name: openai.F("name"), + Parameters: openai.F(shared.FunctionParameters{ + "foo": "bar", + }), + }}), + LogitBias: openai.F(map[string]int64{ + "foo": int64(0), + }), + Logprobs: openai.F(true), + MaxTokens: openai.F(int64(0)), + N: openai.F(int64(1)), + ParallelToolCalls: openai.F(true), + PresencePenalty: openai.F(-2.000000), + ResponseFormat: openai.F(openai.ChatCompletionNewParamsResponseFormat{ + Type: openai.F(openai.ChatCompletionNewParamsResponseFormatTypeJSONObject), + }), + Seed: openai.F(int64(-9007199254740991)), + ServiceTier: openai.F(openai.ChatCompletionNewParamsServiceTierAuto), + Stop: openai.F[openai.ChatCompletionNewParamsStopUnion](shared.UnionString("string")), + StreamOptions: openai.F(openai.ChatCompletionStreamOptionsParam{ + IncludeUsage: openai.F(true), + }), + Temperature: openai.F(1.000000), + ToolChoice: openai.F[openai.ChatCompletionToolChoiceOptionUnionParam](openai.ChatCompletionToolChoiceOptionString(openai.ChatCompletionToolChoiceOptionStringNone)), + Tools: openai.F([]openai.ChatCompletionToolParam{{ + Type: openai.F(openai.ChatCompletionToolTypeFunction), + Function: openai.F(shared.FunctionDefinitionParam{ + Description: openai.F("description"), + Name: openai.F("name"), + Parameters: openai.F(shared.FunctionParameters{ + "foo": "bar", + }), + }), + }, { + Type: openai.F(openai.ChatCompletionToolTypeFunction), + Function: openai.F(shared.FunctionDefinitionParam{ + Description: openai.F("description"), + Name: openai.F("name"), + Parameters: openai.F(shared.FunctionParameters{ + "foo": "bar", + }), + }), + }, { + Type: openai.F(openai.ChatCompletionToolTypeFunction), + Function: openai.F(shared.FunctionDefinitionParam{ + Description: openai.F("description"), + Name: openai.F("name"), + Parameters: openai.F(shared.FunctionParameters{ + "foo": "bar", + }), + }), + }}), + TopLogprobs: openai.F(int64(0)), + TopP: openai.F(1.000000), + User: openai.F("user-1234"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/client.go b/client.go new file mode 100644 index 0000000..e62273d --- /dev/null +++ b/client.go @@ -0,0 +1,135 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "net/http" + "os" + + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// Client creates a struct with services and top level methods that help with +// interacting with the openai API. You should not instantiate this client +// directly, and instead use the [NewClient] method instead. +type Client struct { + Options []option.RequestOption + Completions *CompletionService + Chat *ChatService + Embeddings *EmbeddingService + Files *FileService + Images *ImageService + Audio *AudioService + Moderations *ModerationService + Models *ModelService + FineTuning *FineTuningService + Beta *BetaService + Batches *BatchService + Uploads *UploadService +} + +// NewClient generates a new client with the default option read from the +// environment (OPENAI_API_KEY, OPENAI_ORG_ID, OPENAI_PROJECT_ID). The option +// passed in as arguments are applied after these default arguments, and all option +// will be passed down to the services and requests that this client makes. +func NewClient(opts ...option.RequestOption) (r *Client) { + defaults := []option.RequestOption{option.WithEnvironmentProduction()} + if o, ok := os.LookupEnv("OPENAI_API_KEY"); ok { + defaults = append(defaults, option.WithAPIKey(o)) + } + if o, ok := os.LookupEnv("OPENAI_ORG_ID"); ok { + defaults = append(defaults, option.WithOrganization(o)) + } + if o, ok := os.LookupEnv("OPENAI_PROJECT_ID"); ok { + defaults = append(defaults, option.WithProject(o)) + } + opts = append(defaults, opts...) + + r = &Client{Options: opts} + + r.Completions = NewCompletionService(opts...) + r.Chat = NewChatService(opts...) + r.Embeddings = NewEmbeddingService(opts...) + r.Files = NewFileService(opts...) + r.Images = NewImageService(opts...) + r.Audio = NewAudioService(opts...) + r.Moderations = NewModerationService(opts...) + r.Models = NewModelService(opts...) + r.FineTuning = NewFineTuningService(opts...) + r.Beta = NewBetaService(opts...) + r.Batches = NewBatchService(opts...) + r.Uploads = NewUploadService(opts...) + + return +} + +// Execute makes a request with the given context, method, URL, request params, +// response, and request options. This is useful for hitting undocumented endpoints +// while retaining the base URL, auth, retries, and other options from the client. +// +// If a byte slice or an [io.Reader] is supplied to params, it will be used as-is +// for the request body. +// +// The params is by default serialized into the body using [encoding/json]. If your +// type implements a MarshalJSON function, it will be used instead to serialize the +// request. If a URLQuery method is implemented, the returned [url.Values] will be +// used as query strings to the url. +// +// If your params struct uses [param.Field], you must provide either [MarshalJSON], +// [URLQuery], and/or [MarshalForm] functions. It is undefined behavior to use a +// struct uses [param.Field] without specifying how it is serialized. +// +// Any "…Params" object defined in this library can be used as the request +// argument. Note that 'path' arguments will not be forwarded into the url. +// +// The response body will be deserialized into the res variable, depending on its +// type: +// +// - A pointer to a [*http.Response] is populated by the raw response. +// - A pointer to a byte array will be populated with the contents of the request +// body. +// - A pointer to any other type uses this library's default JSON decoding, which +// respects UnmarshalJSON if it is defined on the type. +// - A nil value will not read the response body. +// +// For even greater flexibility, see [option.WithResponseInto] and +// [option.WithResponseBodyInto]. +func (r *Client) Execute(ctx context.Context, method string, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + opts = append(r.Options, opts...) + return requestconfig.ExecuteNewRequest(ctx, method, path, params, res, opts...) +} + +// Get makes a GET request with the given URL, params, and optionally deserializes +// to a response. See [Execute] documentation on the params and response. +func (r *Client) Get(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodGet, path, params, res, opts...) +} + +// Post makes a POST request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Post(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPost, path, params, res, opts...) +} + +// Put makes a PUT request with the given URL, params, and optionally deserializes +// to a response. See [Execute] documentation on the params and response. +func (r *Client) Put(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPut, path, params, res, opts...) +} + +// Patch makes a PATCH request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Patch(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPatch, path, params, res, opts...) +} + +// Delete makes a DELETE request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Delete(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodDelete, path, params, res, opts...) +} diff --git a/client_test.go b/client_test.go new file mode 100644 index 0000000..2027fab --- /dev/null +++ b/client_test.go @@ -0,0 +1,206 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +type closureTransport struct { + fn func(req *http.Request) (*http.Response, error) +} + +func (t *closureTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.fn(req) +} + +func TestUserAgentHeader(t *testing.T) { + var userAgent string + client := openai.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + userAgent = req.Header.Get("User-Agent") + return &http.Response{ + StatusCode: http.StatusOK, + }, nil + }, + }, + }), + ) + client.Chat.Completions.New(context.Background(), openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if userAgent != fmt.Sprintf("OpenAI/Go %s", internal.PackageVersion) { + t.Errorf("Expected User-Agent to be correct, but got: %#v", userAgent) + } +} + +func TestRetryAfter(t *testing.T) { + attempts := 0 + client := openai.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + attempts++ + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + ) + res, err := client.Chat.Completions.New(context.Background(), openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } + if want := 3; attempts != want { + t.Errorf("Expected %d attempts, got %d", want, attempts) + } +} + +func TestRetryAfterMs(t *testing.T) { + attempts := 0 + client := openai.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + attempts++ + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After-Ms"): []string{"100"}, + }, + }, nil + }, + }, + }), + ) + res, err := client.Chat.Completions.New(context.Background(), openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } + if want := 3; attempts != want { + t.Errorf("Expected %d attempts, got %d", want, attempts) + } +} + +func TestContextCancel(t *testing.T) { + client := openai.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + cancelCtx, cancel := context.WithCancel(context.Background()) + cancel() + res, err := client.Chat.Completions.New(cancelCtx, openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } +} + +func TestContextCancelDelay(t *testing.T) { + client := openai.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + cancelCtx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + defer cancel() + res, err := client.Chat.Completions.New(cancelCtx, openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err == nil || res != nil { + t.Error("expected there to be a cancel error and for the response to be nil") + } +} + +func TestContextDeadline(t *testing.T) { + testTimeout := time.After(3 * time.Second) + testDone := make(chan struct{}) + + deadline := time.Now().Add(100 * time.Millisecond) + deadlineCtx, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + go func() { + client := openai.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + res, err := client.Chat.Completions.New(deadlineCtx, openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err == nil || res != nil { + t.Error("expected there to be a deadline error and for the response to be nil") + } + close(testDone) + }() + + select { + case <-testTimeout: + t.Fatal("client didn't finish in time") + case <-testDone: + if diff := time.Since(deadline); diff < -30*time.Millisecond || 30*time.Millisecond < diff { + t.Fatalf("client did not return within 30ms of context deadline, got %s", diff) + } + } +} diff --git a/completion.go b/completion.go new file mode 100644 index 0000000..afd9616 --- /dev/null +++ b/completion.go @@ -0,0 +1,383 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/packages/ssestream" +) + +// CompletionService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewCompletionService] method instead. +type CompletionService struct { + Options []option.RequestOption +} + +// NewCompletionService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewCompletionService(opts ...option.RequestOption) (r *CompletionService) { + r = &CompletionService{} + r.Options = opts + return +} + +// Creates a completion for the provided prompt and parameters. +func (r *CompletionService) New(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (res *Completion, err error) { + opts = append(r.Options[:], opts...) + path := "completions" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Creates a completion for the provided prompt and parameters. +func (r *CompletionService) NewStreaming(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[Completion]) { + var ( + raw *http.Response + err error + ) + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...) + path := "completions" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...) + return ssestream.NewStream[Completion](ssestream.NewDecoder(raw), err) +} + +// Represents a completion response from the API. Note: both the streamed and +// non-streamed response objects share the same shape (unlike the chat endpoint). +type Completion struct { + // A unique identifier for the completion. + ID string `json:"id,required"` + // The list of completion choices the model generated for the input prompt. + Choices []CompletionChoice `json:"choices,required"` + // The Unix timestamp (in seconds) of when the completion was created. + Created int64 `json:"created,required"` + // The model used for completion. + Model string `json:"model,required"` + // The object type, which is always "text_completion" + Object CompletionObject `json:"object,required"` + // This fingerprint represents the backend configuration that the model runs with. + // + // Can be used in conjunction with the `seed` request parameter to understand when + // backend changes have been made that might impact determinism. + SystemFingerprint string `json:"system_fingerprint"` + // Usage statistics for the completion request. + Usage CompletionUsage `json:"usage"` + JSON completionJSON `json:"-"` +} + +// completionJSON contains the JSON metadata for the struct [Completion] +type completionJSON struct { + ID apijson.Field + Choices apijson.Field + Created apijson.Field + Model apijson.Field + Object apijson.Field + SystemFingerprint apijson.Field + Usage apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Completion) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r completionJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always "text_completion" +type CompletionObject string + +const ( + CompletionObjectTextCompletion CompletionObject = "text_completion" +) + +func (r CompletionObject) IsKnown() bool { + switch r { + case CompletionObjectTextCompletion: + return true + } + return false +} + +type CompletionChoice struct { + // The reason the model stopped generating tokens. This will be `stop` if the model + // hit a natural stop point or a provided stop sequence, `length` if the maximum + // number of tokens specified in the request was reached, or `content_filter` if + // content was omitted due to a flag from our content filters. + FinishReason CompletionChoiceFinishReason `json:"finish_reason,required"` + Index int64 `json:"index,required"` + Logprobs CompletionChoiceLogprobs `json:"logprobs,required,nullable"` + Text string `json:"text,required"` + JSON completionChoiceJSON `json:"-"` +} + +// completionChoiceJSON contains the JSON metadata for the struct +// [CompletionChoice] +type completionChoiceJSON struct { + FinishReason apijson.Field + Index apijson.Field + Logprobs apijson.Field + Text apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CompletionChoice) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r completionChoiceJSON) RawJSON() string { + return r.raw +} + +// The reason the model stopped generating tokens. This will be `stop` if the model +// hit a natural stop point or a provided stop sequence, `length` if the maximum +// number of tokens specified in the request was reached, or `content_filter` if +// content was omitted due to a flag from our content filters. +type CompletionChoiceFinishReason string + +const ( + CompletionChoiceFinishReasonStop CompletionChoiceFinishReason = "stop" + CompletionChoiceFinishReasonLength CompletionChoiceFinishReason = "length" + CompletionChoiceFinishReasonContentFilter CompletionChoiceFinishReason = "content_filter" +) + +func (r CompletionChoiceFinishReason) IsKnown() bool { + switch r { + case CompletionChoiceFinishReasonStop, CompletionChoiceFinishReasonLength, CompletionChoiceFinishReasonContentFilter: + return true + } + return false +} + +type CompletionChoiceLogprobs struct { + TextOffset []int64 `json:"text_offset"` + TokenLogprobs []float64 `json:"token_logprobs"` + Tokens []string `json:"tokens"` + TopLogprobs []map[string]float64 `json:"top_logprobs"` + JSON completionChoiceLogprobsJSON `json:"-"` +} + +// completionChoiceLogprobsJSON contains the JSON metadata for the struct +// [CompletionChoiceLogprobs] +type completionChoiceLogprobsJSON struct { + TextOffset apijson.Field + TokenLogprobs apijson.Field + Tokens apijson.Field + TopLogprobs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CompletionChoiceLogprobs) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r completionChoiceLogprobsJSON) RawJSON() string { + return r.raw +} + +// Usage statistics for the completion request. +type CompletionUsage struct { + // Number of tokens in the generated completion. + CompletionTokens int64 `json:"completion_tokens,required"` + // Number of tokens in the prompt. + PromptTokens int64 `json:"prompt_tokens,required"` + // Total number of tokens used in the request (prompt + completion). + TotalTokens int64 `json:"total_tokens,required"` + JSON completionUsageJSON `json:"-"` +} + +// completionUsageJSON contains the JSON metadata for the struct [CompletionUsage] +type completionUsageJSON struct { + CompletionTokens apijson.Field + PromptTokens apijson.Field + TotalTokens apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CompletionUsage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r completionUsageJSON) RawJSON() string { + return r.raw +} + +type CompletionNewParams struct { + // ID of the model to use. You can use the + // [List models](https://platform.openai.com/docs/api-reference/models/list) API to + // see all of your available models, or see our + // [Model overview](https://platform.openai.com/docs/models/overview) for + // descriptions of them. + Model param.Field[CompletionNewParamsModel] `json:"model,required"` + // The prompt(s) to generate completions for, encoded as a string, array of + // strings, array of tokens, or array of token arrays. + // + // Note that <|endoftext|> is the document separator that the model sees during + // training, so if a prompt is not specified the model will generate as if from the + // beginning of a new document. + Prompt param.Field[CompletionNewParamsPromptUnion] `json:"prompt,required"` + // Generates `best_of` completions server-side and returns the "best" (the one with + // the highest log probability per token). Results cannot be streamed. + // + // When used with `n`, `best_of` controls the number of candidate completions and + // `n` specifies how many to return – `best_of` must be greater than `n`. + // + // **Note:** Because this parameter generates many completions, it can quickly + // consume your token quota. Use carefully and ensure that you have reasonable + // settings for `max_tokens` and `stop`. + BestOf param.Field[int64] `json:"best_of"` + // Echo back the prompt in addition to the completion + Echo param.Field[bool] `json:"echo"` + // Number between -2.0 and 2.0. Positive values penalize new tokens based on their + // existing frequency in the text so far, decreasing the model's likelihood to + // repeat the same line verbatim. + // + // [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + FrequencyPenalty param.Field[float64] `json:"frequency_penalty"` + // Modify the likelihood of specified tokens appearing in the completion. + // + // Accepts a JSON object that maps tokens (specified by their token ID in the GPT + // tokenizer) to an associated bias value from -100 to 100. You can use this + // [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + // Mathematically, the bias is added to the logits generated by the model prior to + // sampling. The exact effect will vary per model, but values between -1 and 1 + // should decrease or increase likelihood of selection; values like -100 or 100 + // should result in a ban or exclusive selection of the relevant token. + // + // As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + // from being generated. + LogitBias param.Field[map[string]int64] `json:"logit_bias"` + // Include the log probabilities on the `logprobs` most likely output tokens, as + // well the chosen tokens. For example, if `logprobs` is 5, the API will return a + // list of the 5 most likely tokens. The API will always return the `logprob` of + // the sampled token, so there may be up to `logprobs+1` elements in the response. + // + // The maximum value for `logprobs` is 5. + Logprobs param.Field[int64] `json:"logprobs"` + // The maximum number of [tokens](/tokenizer) that can be generated in the + // completion. + // + // The token count of your prompt plus `max_tokens` cannot exceed the model's + // context length. + // [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + // for counting tokens. + MaxTokens param.Field[int64] `json:"max_tokens"` + // How many completions to generate for each prompt. + // + // **Note:** Because this parameter generates many completions, it can quickly + // consume your token quota. Use carefully and ensure that you have reasonable + // settings for `max_tokens` and `stop`. + N param.Field[int64] `json:"n"` + // Number between -2.0 and 2.0. Positive values penalize new tokens based on + // whether they appear in the text so far, increasing the model's likelihood to + // talk about new topics. + // + // [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + PresencePenalty param.Field[float64] `json:"presence_penalty"` + // If specified, our system will make a best effort to sample deterministically, + // such that repeated requests with the same `seed` and parameters should return + // the same result. + // + // Determinism is not guaranteed, and you should refer to the `system_fingerprint` + // response parameter to monitor changes in the backend. + Seed param.Field[int64] `json:"seed"` + // Up to 4 sequences where the API will stop generating further tokens. The + // returned text will not contain the stop sequence. + Stop param.Field[CompletionNewParamsStopUnion] `json:"stop"` + // Options for streaming response. Only set this when you set `stream: true`. + StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"` + // The suffix that comes after a completion of inserted text. + // + // This parameter is only supported for `gpt-3.5-turbo-instruct`. + Suffix param.Field[string] `json:"suffix"` + // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + // make the output more random, while lower values like 0.2 will make it more + // focused and deterministic. + // + // We generally recommend altering this or `top_p` but not both. + Temperature param.Field[float64] `json:"temperature"` + // An alternative to sampling with temperature, called nucleus sampling, where the + // model considers the results of the tokens with top_p probability mass. So 0.1 + // means only the tokens comprising the top 10% probability mass are considered. + // + // We generally recommend altering this or `temperature` but not both. + TopP param.Field[float64] `json:"top_p"` + // A unique identifier representing your end-user, which can help OpenAI to monitor + // and detect abuse. + // [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + User param.Field[string] `json:"user"` +} + +func (r CompletionNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type CompletionNewParamsModel string + +const ( + CompletionNewParamsModelGPT3_5TurboInstruct CompletionNewParamsModel = "gpt-3.5-turbo-instruct" + CompletionNewParamsModelDavinci002 CompletionNewParamsModel = "davinci-002" + CompletionNewParamsModelBabbage002 CompletionNewParamsModel = "babbage-002" +) + +func (r CompletionNewParamsModel) IsKnown() bool { + switch r { + case CompletionNewParamsModelGPT3_5TurboInstruct, CompletionNewParamsModelDavinci002, CompletionNewParamsModelBabbage002: + return true + } + return false +} + +// The prompt(s) to generate completions for, encoded as a string, array of +// strings, array of tokens, or array of token arrays. +// +// Note that <|endoftext|> is the document separator that the model sees during +// training, so if a prompt is not specified the model will generate as if from the +// beginning of a new document. +// +// Satisfied by [shared.UnionString], [CompletionNewParamsPromptArrayOfStrings], +// [CompletionNewParamsPromptArrayOfTokens], +// [CompletionNewParamsPromptArrayOfTokenArrays]. +type CompletionNewParamsPromptUnion interface { + ImplementsCompletionNewParamsPromptUnion() +} + +type CompletionNewParamsPromptArrayOfStrings []string + +func (r CompletionNewParamsPromptArrayOfStrings) ImplementsCompletionNewParamsPromptUnion() {} + +type CompletionNewParamsPromptArrayOfTokens []int64 + +func (r CompletionNewParamsPromptArrayOfTokens) ImplementsCompletionNewParamsPromptUnion() {} + +type CompletionNewParamsPromptArrayOfTokenArrays [][]int64 + +func (r CompletionNewParamsPromptArrayOfTokenArrays) ImplementsCompletionNewParamsPromptUnion() {} + +// Up to 4 sequences where the API will stop generating further tokens. The +// returned text will not contain the stop sequence. +// +// Satisfied by [shared.UnionString], [CompletionNewParamsStopArray]. +type CompletionNewParamsStopUnion interface { + ImplementsCompletionNewParamsStopUnion() +} + +type CompletionNewParamsStopArray []string + +func (r CompletionNewParamsStopArray) ImplementsCompletionNewParamsStopUnion() {} diff --git a/completion_test.go b/completion_test.go new file mode 100644 index 0000000..45df624 --- /dev/null +++ b/completion_test.go @@ -0,0 +1,59 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestCompletionNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Completions.New(context.TODO(), openai.CompletionNewParams{ + Model: openai.F(openai.CompletionNewParamsModelGPT3_5TurboInstruct), + Prompt: openai.F[openai.CompletionNewParamsPromptUnion](shared.UnionString("This is a test.")), + BestOf: openai.F(int64(0)), + Echo: openai.F(true), + FrequencyPenalty: openai.F(-2.000000), + LogitBias: openai.F(map[string]int64{ + "foo": int64(0), + }), + Logprobs: openai.F(int64(0)), + MaxTokens: openai.F(int64(16)), + N: openai.F(int64(1)), + PresencePenalty: openai.F(-2.000000), + Seed: openai.F(int64(-9007199254740991)), + Stop: openai.F[openai.CompletionNewParamsStopUnion](shared.UnionString("\n")), + StreamOptions: openai.F(openai.ChatCompletionStreamOptionsParam{ + IncludeUsage: openai.F(true), + }), + Suffix: openai.F("test."), + Temperature: openai.F(1.000000), + TopP: openai.F(1.000000), + User: openai.F("user-1234"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/embedding.go b/embedding.go new file mode 100644 index 0000000..30c9e62 --- /dev/null +++ b/embedding.go @@ -0,0 +1,248 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// EmbeddingService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewEmbeddingService] method instead. +type EmbeddingService struct { + Options []option.RequestOption +} + +// NewEmbeddingService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewEmbeddingService(opts ...option.RequestOption) (r *EmbeddingService) { + r = &EmbeddingService{} + r.Options = opts + return +} + +// Creates an embedding vector representing the input text. +func (r *EmbeddingService) New(ctx context.Context, body EmbeddingNewParams, opts ...option.RequestOption) (res *CreateEmbeddingResponse, err error) { + opts = append(r.Options[:], opts...) + path := "embeddings" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type CreateEmbeddingResponse struct { + // The list of embeddings generated by the model. + Data []Embedding `json:"data,required"` + // The name of the model used to generate the embedding. + Model string `json:"model,required"` + // The object type, which is always "list". + Object CreateEmbeddingResponseObject `json:"object,required"` + // The usage information for the request. + Usage CreateEmbeddingResponseUsage `json:"usage,required"` + JSON createEmbeddingResponseJSON `json:"-"` +} + +// createEmbeddingResponseJSON contains the JSON metadata for the struct +// [CreateEmbeddingResponse] +type createEmbeddingResponseJSON struct { + Data apijson.Field + Model apijson.Field + Object apijson.Field + Usage apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CreateEmbeddingResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r createEmbeddingResponseJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always "list". +type CreateEmbeddingResponseObject string + +const ( + CreateEmbeddingResponseObjectList CreateEmbeddingResponseObject = "list" +) + +func (r CreateEmbeddingResponseObject) IsKnown() bool { + switch r { + case CreateEmbeddingResponseObjectList: + return true + } + return false +} + +// The usage information for the request. +type CreateEmbeddingResponseUsage struct { + // The number of tokens used by the prompt. + PromptTokens int64 `json:"prompt_tokens,required"` + // The total number of tokens used by the request. + TotalTokens int64 `json:"total_tokens,required"` + JSON createEmbeddingResponseUsageJSON `json:"-"` +} + +// createEmbeddingResponseUsageJSON contains the JSON metadata for the struct +// [CreateEmbeddingResponseUsage] +type createEmbeddingResponseUsageJSON struct { + PromptTokens apijson.Field + TotalTokens apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CreateEmbeddingResponseUsage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r createEmbeddingResponseUsageJSON) RawJSON() string { + return r.raw +} + +// Represents an embedding vector returned by embedding endpoint. +type Embedding struct { + // The embedding vector, which is a list of floats. The length of vector depends on + // the model as listed in the + // [embedding guide](https://platform.openai.com/docs/guides/embeddings). + Embedding []float64 `json:"embedding,required"` + // The index of the embedding in the list of embeddings. + Index int64 `json:"index,required"` + // The object type, which is always "embedding". + Object EmbeddingObject `json:"object,required"` + JSON embeddingJSON `json:"-"` +} + +// embeddingJSON contains the JSON metadata for the struct [Embedding] +type embeddingJSON struct { + Embedding apijson.Field + Index apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Embedding) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r embeddingJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always "embedding". +type EmbeddingObject string + +const ( + EmbeddingObjectEmbedding EmbeddingObject = "embedding" +) + +func (r EmbeddingObject) IsKnown() bool { + switch r { + case EmbeddingObjectEmbedding: + return true + } + return false +} + +type EmbeddingNewParams struct { + // Input text to embed, encoded as a string or array of tokens. To embed multiple + // inputs in a single request, pass an array of strings or array of token arrays. + // The input must not exceed the max input tokens for the model (8192 tokens for + // `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + // dimensions or less. + // [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + // for counting tokens. + Input param.Field[EmbeddingNewParamsInputUnion] `json:"input,required"` + // ID of the model to use. You can use the + // [List models](https://platform.openai.com/docs/api-reference/models/list) API to + // see all of your available models, or see our + // [Model overview](https://platform.openai.com/docs/models/overview) for + // descriptions of them. + Model param.Field[EmbeddingNewParamsModel] `json:"model,required"` + // The number of dimensions the resulting output embeddings should have. Only + // supported in `text-embedding-3` and later models. + Dimensions param.Field[int64] `json:"dimensions"` + // The format to return the embeddings in. Can be either `float` or + // [`base64`](https://pypi.org/project/pybase64/). + EncodingFormat param.Field[EmbeddingNewParamsEncodingFormat] `json:"encoding_format"` + // A unique identifier representing your end-user, which can help OpenAI to monitor + // and detect abuse. + // [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + User param.Field[string] `json:"user"` +} + +func (r EmbeddingNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Input text to embed, encoded as a string or array of tokens. To embed multiple +// inputs in a single request, pass an array of strings or array of token arrays. +// The input must not exceed the max input tokens for the model (8192 tokens for +// `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 +// dimensions or less. +// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) +// for counting tokens. +// +// Satisfied by [shared.UnionString], [EmbeddingNewParamsInputArrayOfStrings], +// [EmbeddingNewParamsInputArrayOfTokens], +// [EmbeddingNewParamsInputArrayOfTokenArrays]. +type EmbeddingNewParamsInputUnion interface { + ImplementsEmbeddingNewParamsInputUnion() +} + +type EmbeddingNewParamsInputArrayOfStrings []string + +func (r EmbeddingNewParamsInputArrayOfStrings) ImplementsEmbeddingNewParamsInputUnion() {} + +type EmbeddingNewParamsInputArrayOfTokens []int64 + +func (r EmbeddingNewParamsInputArrayOfTokens) ImplementsEmbeddingNewParamsInputUnion() {} + +type EmbeddingNewParamsInputArrayOfTokenArrays [][]int64 + +func (r EmbeddingNewParamsInputArrayOfTokenArrays) ImplementsEmbeddingNewParamsInputUnion() {} + +type EmbeddingNewParamsModel string + +const ( + EmbeddingNewParamsModelTextEmbeddingAda002 EmbeddingNewParamsModel = "text-embedding-ada-002" + EmbeddingNewParamsModelTextEmbedding3Small EmbeddingNewParamsModel = "text-embedding-3-small" + EmbeddingNewParamsModelTextEmbedding3Large EmbeddingNewParamsModel = "text-embedding-3-large" +) + +func (r EmbeddingNewParamsModel) IsKnown() bool { + switch r { + case EmbeddingNewParamsModelTextEmbeddingAda002, EmbeddingNewParamsModelTextEmbedding3Small, EmbeddingNewParamsModelTextEmbedding3Large: + return true + } + return false +} + +// The format to return the embeddings in. Can be either `float` or +// [`base64`](https://pypi.org/project/pybase64/). +type EmbeddingNewParamsEncodingFormat string + +const ( + EmbeddingNewParamsEncodingFormatFloat EmbeddingNewParamsEncodingFormat = "float" + EmbeddingNewParamsEncodingFormatBase64 EmbeddingNewParamsEncodingFormat = "base64" +) + +func (r EmbeddingNewParamsEncodingFormat) IsKnown() bool { + switch r { + case EmbeddingNewParamsEncodingFormatFloat, EmbeddingNewParamsEncodingFormatBase64: + return true + } + return false +} diff --git a/embedding_test.go b/embedding_test.go new file mode 100644 index 0000000..9296430 --- /dev/null +++ b/embedding_test.go @@ -0,0 +1,43 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestEmbeddingNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Embeddings.New(context.TODO(), openai.EmbeddingNewParams{ + Input: openai.F[openai.EmbeddingNewParamsInputUnion](shared.UnionString("The quick brown fox jumped over the lazy dog")), + Model: openai.F(openai.EmbeddingNewParamsModelTextEmbeddingAda002), + Dimensions: openai.F(int64(1)), + EncodingFormat: openai.F(openai.EmbeddingNewParamsEncodingFormatFloat), + User: openai.F("user-1234"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/examples/.keep b/examples/.keep new file mode 100644 index 0000000..d8c73e9 --- /dev/null +++ b/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/field.go b/field.go new file mode 100644 index 0000000..730a024 --- /dev/null +++ b/field.go @@ -0,0 +1,50 @@ +package openai + +import ( + "github.com/openai/openai-go/internal/param" + "io" +) + +// F is a param field helper used to initialize a [param.Field] generic struct. +// This helps specify null, zero values, and overrides, as well as normal values. +// You can read more about this in our [README]. +// +// [README]: https://pkg.go.dev/github.com/openai/openai-go#readme-request-fields +func F[T any](value T) param.Field[T] { return param.Field[T]{Value: value, Present: true} } + +// Null is a param field helper which explicitly sends null to the API. +func Null[T any]() param.Field[T] { return param.Field[T]{Null: true, Present: true} } + +// Raw is a param field helper for specifying values for fields when the +// type you are looking to send is different from the type that is specified in +// the SDK. For example, if the type of the field is an integer, but you want +// to send a float, you could do that by setting the corresponding field with +// Raw[int](0.5). +func Raw[T any](value any) param.Field[T] { return param.Field[T]{Raw: value, Present: true} } + +// Int is a param field helper which helps specify integers. This is +// particularly helpful when specifying integer constants for fields. +func Int(value int64) param.Field[int64] { return F(value) } + +// String is a param field helper which helps specify strings. +func String(value string) param.Field[string] { return F(value) } + +// Float is a param field helper which helps specify floats. +func Float(value float64) param.Field[float64] { return F(value) } + +// Bool is a param field helper which helps specify bools. +func Bool(value bool) param.Field[bool] { return F(value) } + +// FileParam is a param field helper which helps files with a mime content-type. +func FileParam(reader io.Reader, filename string, contentType string) param.Field[io.Reader] { + return F[io.Reader](&file{reader, filename, contentType}) +} + +type file struct { + io.Reader + name string + contentType string +} + +func (f *file) Name() string { return f.name } +func (f *file) ContentType() string { return f.contentType } diff --git a/file.go b/file.go new file mode 100644 index 0000000..76fda1e --- /dev/null +++ b/file.go @@ -0,0 +1,337 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + + "github.com/openai/openai-go/internal/apiform" + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// FileService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewFileService] method instead. +type FileService struct { + Options []option.RequestOption +} + +// NewFileService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewFileService(opts ...option.RequestOption) (r *FileService) { + r = &FileService{} + r.Options = opts + return +} + +// Upload a file that can be used across various endpoints. Individual files can be +// up to 512 MB, and the size of all files uploaded by one organization can be up +// to 100 GB. +// +// The Assistants API supports files up to 2 million tokens and of specific file +// types. See the +// [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for +// details. +// +// The Fine-tuning API only supports `.jsonl` files. The input also has certain +// required formats for fine-tuning +// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or +// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) +// models. +// +// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also +// has a specific required +// [format](https://platform.openai.com/docs/api-reference/batch/request-input). +// +// Please [contact us](https://help.openai.com/) if you need to increase these +// storage limits. +func (r *FileService) New(ctx context.Context, body FileNewParams, opts ...option.RequestOption) (res *FileObject, err error) { + opts = append(r.Options[:], opts...) + path := "files" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Returns information about a specific file. +func (r *FileService) Get(ctx context.Context, fileID string, opts ...option.RequestOption) (res *FileObject, err error) { + opts = append(r.Options[:], opts...) + if fileID == "" { + err = errors.New("missing required file_id parameter") + return + } + path := fmt.Sprintf("files/%s", fileID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Returns a list of files that belong to the user's organization. +func (r *FileService) List(ctx context.Context, query FileListParams, opts ...option.RequestOption) (res *pagination.Page[FileObject], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + path := "files" + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Returns a list of files that belong to the user's organization. +func (r *FileService) ListAutoPaging(ctx context.Context, query FileListParams, opts ...option.RequestOption) *pagination.PageAutoPager[FileObject] { + return pagination.NewPageAutoPager(r.List(ctx, query, opts...)) +} + +// Delete a file. +func (r *FileService) Delete(ctx context.Context, fileID string, opts ...option.RequestOption) (res *FileDeleted, err error) { + opts = append(r.Options[:], opts...) + if fileID == "" { + err = errors.New("missing required file_id parameter") + return + } + path := fmt.Sprintf("files/%s", fileID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// Returns the contents of the specified file. +func (r *FileService) Content(ctx context.Context, fileID string, opts ...option.RequestOption) (res *http.Response, err error) { + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithHeader("Accept", "application/binary")}, opts...) + if fileID == "" { + err = errors.New("missing required file_id parameter") + return + } + path := fmt.Sprintf("files/%s/content", fileID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type FileDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object FileDeletedObject `json:"object,required"` + JSON fileDeletedJSON `json:"-"` +} + +// fileDeletedJSON contains the JSON metadata for the struct [FileDeleted] +type fileDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileDeletedJSON) RawJSON() string { + return r.raw +} + +type FileDeletedObject string + +const ( + FileDeletedObjectFile FileDeletedObject = "file" +) + +func (r FileDeletedObject) IsKnown() bool { + switch r { + case FileDeletedObjectFile: + return true + } + return false +} + +// The `File` object represents a document that has been uploaded to OpenAI. +type FileObject struct { + // The file identifier, which can be referenced in the API endpoints. + ID string `json:"id,required"` + // The size of the file, in bytes. + Bytes int64 `json:"bytes,required"` + // The Unix timestamp (in seconds) for when the file was created. + CreatedAt int64 `json:"created_at,required"` + // The name of the file. + Filename string `json:"filename,required"` + // The object type, which is always `file`. + Object FileObjectObject `json:"object,required"` + // The intended purpose of the file. Supported values are `assistants`, + // `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + // and `vision`. + Purpose FileObjectPurpose `json:"purpose,required"` + // Deprecated. The current status of the file, which can be either `uploaded`, + // `processed`, or `error`. + Status FileObjectStatus `json:"status,required"` + // Deprecated. For details on why a fine-tuning training file failed validation, + // see the `error` field on `fine_tuning.job`. + StatusDetails string `json:"status_details"` + JSON fileObjectJSON `json:"-"` +} + +// fileObjectJSON contains the JSON metadata for the struct [FileObject] +type fileObjectJSON struct { + ID apijson.Field + Bytes apijson.Field + CreatedAt apijson.Field + Filename apijson.Field + Object apijson.Field + Purpose apijson.Field + Status apijson.Field + StatusDetails apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FileObject) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fileObjectJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `file`. +type FileObjectObject string + +const ( + FileObjectObjectFile FileObjectObject = "file" +) + +func (r FileObjectObject) IsKnown() bool { + switch r { + case FileObjectObjectFile: + return true + } + return false +} + +// The intended purpose of the file. Supported values are `assistants`, +// `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` +// and `vision`. +type FileObjectPurpose string + +const ( + FileObjectPurposeAssistants FileObjectPurpose = "assistants" + FileObjectPurposeAssistantsOutput FileObjectPurpose = "assistants_output" + FileObjectPurposeBatch FileObjectPurpose = "batch" + FileObjectPurposeBatchOutput FileObjectPurpose = "batch_output" + FileObjectPurposeFineTune FileObjectPurpose = "fine-tune" + FileObjectPurposeFineTuneResults FileObjectPurpose = "fine-tune-results" + FileObjectPurposeVision FileObjectPurpose = "vision" +) + +func (r FileObjectPurpose) IsKnown() bool { + switch r { + case FileObjectPurposeAssistants, FileObjectPurposeAssistantsOutput, FileObjectPurposeBatch, FileObjectPurposeBatchOutput, FileObjectPurposeFineTune, FileObjectPurposeFineTuneResults, FileObjectPurposeVision: + return true + } + return false +} + +// Deprecated. The current status of the file, which can be either `uploaded`, +// `processed`, or `error`. +type FileObjectStatus string + +const ( + FileObjectStatusUploaded FileObjectStatus = "uploaded" + FileObjectStatusProcessed FileObjectStatus = "processed" + FileObjectStatusError FileObjectStatus = "error" +) + +func (r FileObjectStatus) IsKnown() bool { + switch r { + case FileObjectStatusUploaded, FileObjectStatusProcessed, FileObjectStatusError: + return true + } + return false +} + +type FileNewParams struct { + // The File object (not file name) to be uploaded. + File param.Field[io.Reader] `json:"file,required" format:"binary"` + // The intended purpose of the uploaded file. + // + // Use "assistants" for + // [Assistants](https://platform.openai.com/docs/api-reference/assistants) and + // [Message](https://platform.openai.com/docs/api-reference/messages) files, + // "vision" for Assistants image file inputs, "batch" for + // [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for + // [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + Purpose param.Field[FileNewParamsPurpose] `json:"purpose,required"` +} + +func (r FileNewParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} + +// The intended purpose of the uploaded file. +// +// Use "assistants" for +// [Assistants](https://platform.openai.com/docs/api-reference/assistants) and +// [Message](https://platform.openai.com/docs/api-reference/messages) files, +// "vision" for Assistants image file inputs, "batch" for +// [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for +// [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). +type FileNewParamsPurpose string + +const ( + FileNewParamsPurposeAssistants FileNewParamsPurpose = "assistants" + FileNewParamsPurposeBatch FileNewParamsPurpose = "batch" + FileNewParamsPurposeFineTune FileNewParamsPurpose = "fine-tune" + FileNewParamsPurposeVision FileNewParamsPurpose = "vision" +) + +func (r FileNewParamsPurpose) IsKnown() bool { + switch r { + case FileNewParamsPurposeAssistants, FileNewParamsPurposeBatch, FileNewParamsPurposeFineTune, FileNewParamsPurposeVision: + return true + } + return false +} + +type FileListParams struct { + // Only return files with the given purpose. + Purpose param.Field[string] `query:"purpose"` +} + +// URLQuery serializes [FileListParams]'s query parameters as `url.Values`. +func (r FileListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/file_test.go b/file_test.go new file mode 100644 index 0000000..c5b5557 --- /dev/null +++ b/file_test.go @@ -0,0 +1,145 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestFileNew(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Files.New(context.TODO(), openai.FileNewParams{ + File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + Purpose: openai.F(openai.FileNewParamsPurposeAssistants), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFileGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Files.Get(context.TODO(), "file_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFileListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Files.List(context.TODO(), openai.FileListParams{ + Purpose: openai.F("purpose"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFileDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Files.Delete(context.TODO(), "file_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFileContent(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Write([]byte("abc")) + })) + defer server.Close() + baseURL := server.URL + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + resp, err := client.Files.Content(context.TODO(), "file_id") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } + defer resp.Body.Close() + + b, err := io.ReadAll(resp.Body) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } + if !bytes.Equal(b, []byte("abc")) { + t.Fatalf("return value not %s: %s", "abc", b) + } +} diff --git a/finetuning.go b/finetuning.go new file mode 100644 index 0000000..88fa9c5 --- /dev/null +++ b/finetuning.go @@ -0,0 +1,28 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "github.com/openai/openai-go/option" +) + +// FineTuningService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewFineTuningService] method instead. +type FineTuningService struct { + Options []option.RequestOption + Jobs *FineTuningJobService +} + +// NewFineTuningService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewFineTuningService(opts ...option.RequestOption) (r *FineTuningService) { + r = &FineTuningService{} + r.Options = opts + r.Jobs = NewFineTuningJobService(opts...) + return +} diff --git a/finetuningjob.go b/finetuningjob.go new file mode 100644 index 0000000..fe162ed --- /dev/null +++ b/finetuningjob.go @@ -0,0 +1,749 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" + "github.com/tidwall/gjson" +) + +// FineTuningJobService contains methods and other services that help with +// interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewFineTuningJobService] method instead. +type FineTuningJobService struct { + Options []option.RequestOption + Checkpoints *FineTuningJobCheckpointService +} + +// NewFineTuningJobService generates a new service that applies the given options +// to each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewFineTuningJobService(opts ...option.RequestOption) (r *FineTuningJobService) { + r = &FineTuningJobService{} + r.Options = opts + r.Checkpoints = NewFineTuningJobCheckpointService(opts...) + return +} + +// Creates a fine-tuning job which begins the process of creating a new model from +// a given dataset. +// +// Response includes details of the enqueued job including job status and the name +// of the fine-tuned models once complete. +// +// [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) +func (r *FineTuningJobService) New(ctx context.Context, body FineTuningJobNewParams, opts ...option.RequestOption) (res *FineTuningJob, err error) { + opts = append(r.Options[:], opts...) + path := "fine_tuning/jobs" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Get info about a fine-tuning job. +// +// [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) +func (r *FineTuningJobService) Get(ctx context.Context, fineTuningJobID string, opts ...option.RequestOption) (res *FineTuningJob, err error) { + opts = append(r.Options[:], opts...) + if fineTuningJobID == "" { + err = errors.New("missing required fine_tuning_job_id parameter") + return + } + path := fmt.Sprintf("fine_tuning/jobs/%s", fineTuningJobID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// List your organization's fine-tuning jobs +func (r *FineTuningJobService) List(ctx context.Context, query FineTuningJobListParams, opts ...option.RequestOption) (res *pagination.CursorPage[FineTuningJob], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + path := "fine_tuning/jobs" + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// List your organization's fine-tuning jobs +func (r *FineTuningJobService) ListAutoPaging(ctx context.Context, query FineTuningJobListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[FineTuningJob] { + return pagination.NewCursorPageAutoPager(r.List(ctx, query, opts...)) +} + +// Immediately cancel a fine-tune job. +func (r *FineTuningJobService) Cancel(ctx context.Context, fineTuningJobID string, opts ...option.RequestOption) (res *FineTuningJob, err error) { + opts = append(r.Options[:], opts...) + if fineTuningJobID == "" { + err = errors.New("missing required fine_tuning_job_id parameter") + return + } + path := fmt.Sprintf("fine_tuning/jobs/%s/cancel", fineTuningJobID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +// Get status updates for a fine-tuning job. +func (r *FineTuningJobService) ListEvents(ctx context.Context, fineTuningJobID string, query FineTuningJobListEventsParams, opts ...option.RequestOption) (res *pagination.CursorPage[FineTuningJobEvent], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if fineTuningJobID == "" { + err = errors.New("missing required fine_tuning_job_id parameter") + return + } + path := fmt.Sprintf("fine_tuning/jobs/%s/events", fineTuningJobID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Get status updates for a fine-tuning job. +func (r *FineTuningJobService) ListEventsAutoPaging(ctx context.Context, fineTuningJobID string, query FineTuningJobListEventsParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[FineTuningJobEvent] { + return pagination.NewCursorPageAutoPager(r.ListEvents(ctx, fineTuningJobID, query, opts...)) +} + +// The `fine_tuning.job` object represents a fine-tuning job that has been created +// through the API. +type FineTuningJob struct { + // The object identifier, which can be referenced in the API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the fine-tuning job was created. + CreatedAt int64 `json:"created_at,required"` + // For fine-tuning jobs that have `failed`, this will contain more information on + // the cause of the failure. + Error FineTuningJobError `json:"error,required,nullable"` + // The name of the fine-tuned model that is being created. The value will be null + // if the fine-tuning job is still running. + FineTunedModel string `json:"fine_tuned_model,required,nullable"` + // The Unix timestamp (in seconds) for when the fine-tuning job was finished. The + // value will be null if the fine-tuning job is still running. + FinishedAt int64 `json:"finished_at,required,nullable"` + // The hyperparameters used for the fine-tuning job. See the + // [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for + // more details. + Hyperparameters FineTuningJobHyperparameters `json:"hyperparameters,required"` + // The base model that is being fine-tuned. + Model string `json:"model,required"` + // The object type, which is always "fine_tuning.job". + Object FineTuningJobObject `json:"object,required"` + // The organization that owns the fine-tuning job. + OrganizationID string `json:"organization_id,required"` + // The compiled results file ID(s) for the fine-tuning job. You can retrieve the + // results with the + // [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + ResultFiles []string `json:"result_files,required"` + // The seed used for the fine-tuning job. + Seed int64 `json:"seed,required"` + // The current status of the fine-tuning job, which can be either + // `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + Status FineTuningJobStatus `json:"status,required"` + // The total number of billable tokens processed by this fine-tuning job. The value + // will be null if the fine-tuning job is still running. + TrainedTokens int64 `json:"trained_tokens,required,nullable"` + // The file ID used for training. You can retrieve the training data with the + // [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + TrainingFile string `json:"training_file,required"` + // The file ID used for validation. You can retrieve the validation results with + // the + // [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + ValidationFile string `json:"validation_file,required,nullable"` + // The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + // finish. The value will be null if the fine-tuning job is not running. + EstimatedFinish int64 `json:"estimated_finish,nullable"` + // A list of integrations to enable for this fine-tuning job. + Integrations []FineTuningJobWandbIntegrationObject `json:"integrations,nullable"` + JSON fineTuningJobJSON `json:"-"` +} + +// fineTuningJobJSON contains the JSON metadata for the struct [FineTuningJob] +type fineTuningJobJSON struct { + ID apijson.Field + CreatedAt apijson.Field + Error apijson.Field + FineTunedModel apijson.Field + FinishedAt apijson.Field + Hyperparameters apijson.Field + Model apijson.Field + Object apijson.Field + OrganizationID apijson.Field + ResultFiles apijson.Field + Seed apijson.Field + Status apijson.Field + TrainedTokens apijson.Field + TrainingFile apijson.Field + ValidationFile apijson.Field + EstimatedFinish apijson.Field + Integrations apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJob) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobJSON) RawJSON() string { + return r.raw +} + +// For fine-tuning jobs that have `failed`, this will contain more information on +// the cause of the failure. +type FineTuningJobError struct { + // A machine-readable error code. + Code string `json:"code,required"` + // A human-readable error message. + Message string `json:"message,required"` + // The parameter that was invalid, usually `training_file` or `validation_file`. + // This field will be null if the failure was not parameter-specific. + Param string `json:"param,required,nullable"` + JSON fineTuningJobErrorJSON `json:"-"` +} + +// fineTuningJobErrorJSON contains the JSON metadata for the struct +// [FineTuningJobError] +type fineTuningJobErrorJSON struct { + Code apijson.Field + Message apijson.Field + Param apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobError) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobErrorJSON) RawJSON() string { + return r.raw +} + +// The hyperparameters used for the fine-tuning job. See the +// [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for +// more details. +type FineTuningJobHyperparameters struct { + // The number of epochs to train the model for. An epoch refers to one full cycle + // through the training dataset. "auto" decides the optimal number of epochs based + // on the size of the dataset. If setting the number manually, we support any + // number between 1 and 50 epochs. + NEpochs FineTuningJobHyperparametersNEpochsUnion `json:"n_epochs,required"` + JSON fineTuningJobHyperparametersJSON `json:"-"` +} + +// fineTuningJobHyperparametersJSON contains the JSON metadata for the struct +// [FineTuningJobHyperparameters] +type fineTuningJobHyperparametersJSON struct { + NEpochs apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobHyperparameters) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobHyperparametersJSON) RawJSON() string { + return r.raw +} + +// The number of epochs to train the model for. An epoch refers to one full cycle +// through the training dataset. "auto" decides the optimal number of epochs based +// on the size of the dataset. If setting the number manually, we support any +// number between 1 and 50 epochs. +// +// Union satisfied by [FineTuningJobHyperparametersNEpochsString] or +// [shared.UnionInt]. +type FineTuningJobHyperparametersNEpochsUnion interface { + ImplementsFineTuningJobHyperparametersNEpochsUnion() +} + +func init() { + apijson.RegisterUnion( + reflect.TypeOf((*FineTuningJobHyperparametersNEpochsUnion)(nil)).Elem(), + "", + apijson.UnionVariant{ + TypeFilter: gjson.String, + Type: reflect.TypeOf(FineTuningJobHyperparametersNEpochsString("")), + }, + apijson.UnionVariant{ + TypeFilter: gjson.Number, + Type: reflect.TypeOf(shared.UnionInt(0)), + }, + ) +} + +type FineTuningJobHyperparametersNEpochsString string + +const ( + FineTuningJobHyperparametersNEpochsStringAuto FineTuningJobHyperparametersNEpochsString = "auto" +) + +func (r FineTuningJobHyperparametersNEpochsString) IsKnown() bool { + switch r { + case FineTuningJobHyperparametersNEpochsStringAuto: + return true + } + return false +} + +func (r FineTuningJobHyperparametersNEpochsString) ImplementsFineTuningJobHyperparametersNEpochsUnion() { +} + +// The object type, which is always "fine_tuning.job". +type FineTuningJobObject string + +const ( + FineTuningJobObjectFineTuningJob FineTuningJobObject = "fine_tuning.job" +) + +func (r FineTuningJobObject) IsKnown() bool { + switch r { + case FineTuningJobObjectFineTuningJob: + return true + } + return false +} + +// The current status of the fine-tuning job, which can be either +// `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. +type FineTuningJobStatus string + +const ( + FineTuningJobStatusValidatingFiles FineTuningJobStatus = "validating_files" + FineTuningJobStatusQueued FineTuningJobStatus = "queued" + FineTuningJobStatusRunning FineTuningJobStatus = "running" + FineTuningJobStatusSucceeded FineTuningJobStatus = "succeeded" + FineTuningJobStatusFailed FineTuningJobStatus = "failed" + FineTuningJobStatusCancelled FineTuningJobStatus = "cancelled" +) + +func (r FineTuningJobStatus) IsKnown() bool { + switch r { + case FineTuningJobStatusValidatingFiles, FineTuningJobStatusQueued, FineTuningJobStatusRunning, FineTuningJobStatusSucceeded, FineTuningJobStatusFailed, FineTuningJobStatusCancelled: + return true + } + return false +} + +// Fine-tuning job event object +type FineTuningJobEvent struct { + ID string `json:"id,required"` + CreatedAt int64 `json:"created_at,required"` + Level FineTuningJobEventLevel `json:"level,required"` + Message string `json:"message,required"` + Object FineTuningJobEventObject `json:"object,required"` + JSON fineTuningJobEventJSON `json:"-"` +} + +// fineTuningJobEventJSON contains the JSON metadata for the struct +// [FineTuningJobEvent] +type fineTuningJobEventJSON struct { + ID apijson.Field + CreatedAt apijson.Field + Level apijson.Field + Message apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobEvent) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobEventJSON) RawJSON() string { + return r.raw +} + +type FineTuningJobEventLevel string + +const ( + FineTuningJobEventLevelInfo FineTuningJobEventLevel = "info" + FineTuningJobEventLevelWarn FineTuningJobEventLevel = "warn" + FineTuningJobEventLevelError FineTuningJobEventLevel = "error" +) + +func (r FineTuningJobEventLevel) IsKnown() bool { + switch r { + case FineTuningJobEventLevelInfo, FineTuningJobEventLevelWarn, FineTuningJobEventLevelError: + return true + } + return false +} + +type FineTuningJobEventObject string + +const ( + FineTuningJobEventObjectFineTuningJobEvent FineTuningJobEventObject = "fine_tuning.job.event" +) + +func (r FineTuningJobEventObject) IsKnown() bool { + switch r { + case FineTuningJobEventObjectFineTuningJobEvent: + return true + } + return false +} + +type FineTuningJobWandbIntegrationObject struct { + // The type of the integration being enabled for the fine-tuning job + Type FineTuningJobWandbIntegrationObjectType `json:"type,required"` + // The settings for your integration with Weights and Biases. This payload + // specifies the project that metrics will be sent to. Optionally, you can set an + // explicit display name for your run, add tags to your run, and set a default + // entity (team, username, etc) to be associated with your run. + Wandb FineTuningJobWandbIntegration `json:"wandb,required"` + JSON fineTuningJobWandbIntegrationObjectJSON `json:"-"` +} + +// fineTuningJobWandbIntegrationObjectJSON contains the JSON metadata for the +// struct [FineTuningJobWandbIntegrationObject] +type fineTuningJobWandbIntegrationObjectJSON struct { + Type apijson.Field + Wandb apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobWandbIntegrationObject) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobWandbIntegrationObjectJSON) RawJSON() string { + return r.raw +} + +// The type of the integration being enabled for the fine-tuning job +type FineTuningJobWandbIntegrationObjectType string + +const ( + FineTuningJobWandbIntegrationObjectTypeWandb FineTuningJobWandbIntegrationObjectType = "wandb" +) + +func (r FineTuningJobWandbIntegrationObjectType) IsKnown() bool { + switch r { + case FineTuningJobWandbIntegrationObjectTypeWandb: + return true + } + return false +} + +// The settings for your integration with Weights and Biases. This payload +// specifies the project that metrics will be sent to. Optionally, you can set an +// explicit display name for your run, add tags to your run, and set a default +// entity (team, username, etc) to be associated with your run. +type FineTuningJobWandbIntegration struct { + // The name of the project that the new run will be created under. + Project string `json:"project,required"` + // The entity to use for the run. This allows you to set the team or username of + // the WandB user that you would like associated with the run. If not set, the + // default entity for the registered WandB API key is used. + Entity string `json:"entity,nullable"` + // A display name to set for the run. If not set, we will use the Job ID as the + // name. + Name string `json:"name,nullable"` + // A list of tags to be attached to the newly created run. These tags are passed + // through directly to WandB. Some default tags are generated by OpenAI: + // "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + Tags []string `json:"tags"` + JSON fineTuningJobWandbIntegrationJSON `json:"-"` +} + +// fineTuningJobWandbIntegrationJSON contains the JSON metadata for the struct +// [FineTuningJobWandbIntegration] +type fineTuningJobWandbIntegrationJSON struct { + Project apijson.Field + Entity apijson.Field + Name apijson.Field + Tags apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobWandbIntegration) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobWandbIntegrationJSON) RawJSON() string { + return r.raw +} + +type FineTuningJobNewParams struct { + // The name of the model to fine-tune. You can select one of the + // [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + Model param.Field[FineTuningJobNewParamsModel] `json:"model,required"` + // The ID of an uploaded file that contains training data. + // + // See [upload file](https://platform.openai.com/docs/api-reference/files/create) + // for how to upload a file. + // + // Your dataset must be formatted as a JSONL file. Additionally, you must upload + // your file with the purpose `fine-tune`. + // + // The contents of the file should differ depending on if the model uses the + // [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + // [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + // format. + // + // See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + // for more details. + TrainingFile param.Field[string] `json:"training_file,required"` + // The hyperparameters used for the fine-tuning job. + Hyperparameters param.Field[FineTuningJobNewParamsHyperparameters] `json:"hyperparameters"` + // A list of integrations to enable for your fine-tuning job. + Integrations param.Field[[]FineTuningJobNewParamsIntegration] `json:"integrations"` + // The seed controls the reproducibility of the job. Passing in the same seed and + // job parameters should produce the same results, but may differ in rare cases. If + // a seed is not specified, one will be generated for you. + Seed param.Field[int64] `json:"seed"` + // A string of up to 18 characters that will be added to your fine-tuned model + // name. + // + // For example, a `suffix` of "custom-model-name" would produce a model name like + // `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + Suffix param.Field[string] `json:"suffix"` + // The ID of an uploaded file that contains validation data. + // + // If you provide this file, the data is used to generate validation metrics + // periodically during fine-tuning. These metrics can be viewed in the fine-tuning + // results file. The same data should not be present in both train and validation + // files. + // + // Your dataset must be formatted as a JSONL file. You must upload your file with + // the purpose `fine-tune`. + // + // See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + // for more details. + ValidationFile param.Field[string] `json:"validation_file"` +} + +func (r FineTuningJobNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type FineTuningJobNewParamsModel string + +const ( + FineTuningJobNewParamsModelBabbage002 FineTuningJobNewParamsModel = "babbage-002" + FineTuningJobNewParamsModelDavinci002 FineTuningJobNewParamsModel = "davinci-002" + FineTuningJobNewParamsModelGPT3_5Turbo FineTuningJobNewParamsModel = "gpt-3.5-turbo" +) + +func (r FineTuningJobNewParamsModel) IsKnown() bool { + switch r { + case FineTuningJobNewParamsModelBabbage002, FineTuningJobNewParamsModelDavinci002, FineTuningJobNewParamsModelGPT3_5Turbo: + return true + } + return false +} + +// The hyperparameters used for the fine-tuning job. +type FineTuningJobNewParamsHyperparameters struct { + // Number of examples in each batch. A larger batch size means that model + // parameters are updated less frequently, but with lower variance. + BatchSize param.Field[FineTuningJobNewParamsHyperparametersBatchSizeUnion] `json:"batch_size"` + // Scaling factor for the learning rate. A smaller learning rate may be useful to + // avoid overfitting. + LearningRateMultiplier param.Field[FineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion] `json:"learning_rate_multiplier"` + // The number of epochs to train the model for. An epoch refers to one full cycle + // through the training dataset. + NEpochs param.Field[FineTuningJobNewParamsHyperparametersNEpochsUnion] `json:"n_epochs"` +} + +func (r FineTuningJobNewParamsHyperparameters) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// Number of examples in each batch. A larger batch size means that model +// parameters are updated less frequently, but with lower variance. +// +// Satisfied by [FineTuningJobNewParamsHyperparametersBatchSizeString], +// [shared.UnionInt]. +type FineTuningJobNewParamsHyperparametersBatchSizeUnion interface { + ImplementsFineTuningJobNewParamsHyperparametersBatchSizeUnion() +} + +type FineTuningJobNewParamsHyperparametersBatchSizeString string + +const ( + FineTuningJobNewParamsHyperparametersBatchSizeStringAuto FineTuningJobNewParamsHyperparametersBatchSizeString = "auto" +) + +func (r FineTuningJobNewParamsHyperparametersBatchSizeString) IsKnown() bool { + switch r { + case FineTuningJobNewParamsHyperparametersBatchSizeStringAuto: + return true + } + return false +} + +func (r FineTuningJobNewParamsHyperparametersBatchSizeString) ImplementsFineTuningJobNewParamsHyperparametersBatchSizeUnion() { +} + +// Scaling factor for the learning rate. A smaller learning rate may be useful to +// avoid overfitting. +// +// Satisfied by +// [FineTuningJobNewParamsHyperparametersLearningRateMultiplierString], +// [shared.UnionFloat]. +type FineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion interface { + ImplementsFineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion() +} + +type FineTuningJobNewParamsHyperparametersLearningRateMultiplierString string + +const ( + FineTuningJobNewParamsHyperparametersLearningRateMultiplierStringAuto FineTuningJobNewParamsHyperparametersLearningRateMultiplierString = "auto" +) + +func (r FineTuningJobNewParamsHyperparametersLearningRateMultiplierString) IsKnown() bool { + switch r { + case FineTuningJobNewParamsHyperparametersLearningRateMultiplierStringAuto: + return true + } + return false +} + +func (r FineTuningJobNewParamsHyperparametersLearningRateMultiplierString) ImplementsFineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion() { +} + +// The number of epochs to train the model for. An epoch refers to one full cycle +// through the training dataset. +// +// Satisfied by [FineTuningJobNewParamsHyperparametersNEpochsString], +// [shared.UnionInt]. +type FineTuningJobNewParamsHyperparametersNEpochsUnion interface { + ImplementsFineTuningJobNewParamsHyperparametersNEpochsUnion() +} + +type FineTuningJobNewParamsHyperparametersNEpochsString string + +const ( + FineTuningJobNewParamsHyperparametersNEpochsStringAuto FineTuningJobNewParamsHyperparametersNEpochsString = "auto" +) + +func (r FineTuningJobNewParamsHyperparametersNEpochsString) IsKnown() bool { + switch r { + case FineTuningJobNewParamsHyperparametersNEpochsStringAuto: + return true + } + return false +} + +func (r FineTuningJobNewParamsHyperparametersNEpochsString) ImplementsFineTuningJobNewParamsHyperparametersNEpochsUnion() { +} + +type FineTuningJobNewParamsIntegration struct { + // The type of integration to enable. Currently, only "wandb" (Weights and Biases) + // is supported. + Type param.Field[FineTuningJobNewParamsIntegrationsType] `json:"type,required"` + // The settings for your integration with Weights and Biases. This payload + // specifies the project that metrics will be sent to. Optionally, you can set an + // explicit display name for your run, add tags to your run, and set a default + // entity (team, username, etc) to be associated with your run. + Wandb param.Field[FineTuningJobNewParamsIntegrationsWandb] `json:"wandb,required"` +} + +func (r FineTuningJobNewParamsIntegration) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The type of integration to enable. Currently, only "wandb" (Weights and Biases) +// is supported. +type FineTuningJobNewParamsIntegrationsType string + +const ( + FineTuningJobNewParamsIntegrationsTypeWandb FineTuningJobNewParamsIntegrationsType = "wandb" +) + +func (r FineTuningJobNewParamsIntegrationsType) IsKnown() bool { + switch r { + case FineTuningJobNewParamsIntegrationsTypeWandb: + return true + } + return false +} + +// The settings for your integration with Weights and Biases. This payload +// specifies the project that metrics will be sent to. Optionally, you can set an +// explicit display name for your run, add tags to your run, and set a default +// entity (team, username, etc) to be associated with your run. +type FineTuningJobNewParamsIntegrationsWandb struct { + // The name of the project that the new run will be created under. + Project param.Field[string] `json:"project,required"` + // The entity to use for the run. This allows you to set the team or username of + // the WandB user that you would like associated with the run. If not set, the + // default entity for the registered WandB API key is used. + Entity param.Field[string] `json:"entity"` + // A display name to set for the run. If not set, we will use the Job ID as the + // name. + Name param.Field[string] `json:"name"` + // A list of tags to be attached to the newly created run. These tags are passed + // through directly to WandB. Some default tags are generated by OpenAI: + // "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + Tags param.Field[[]string] `json:"tags"` +} + +func (r FineTuningJobNewParamsIntegrationsWandb) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type FineTuningJobListParams struct { + // Identifier for the last job from the previous pagination request. + After param.Field[string] `query:"after"` + // Number of fine-tuning jobs to retrieve. + Limit param.Field[int64] `query:"limit"` +} + +// URLQuery serializes [FineTuningJobListParams]'s query parameters as +// `url.Values`. +func (r FineTuningJobListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +type FineTuningJobListEventsParams struct { + // Identifier for the last event from the previous pagination request. + After param.Field[string] `query:"after"` + // Number of events to retrieve. + Limit param.Field[int64] `query:"limit"` +} + +// URLQuery serializes [FineTuningJobListEventsParams]'s query parameters as +// `url.Values`. +func (r FineTuningJobListEventsParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/finetuningjob_test.go b/finetuningjob_test.go new file mode 100644 index 0000000..75e8c8a --- /dev/null +++ b/finetuningjob_test.go @@ -0,0 +1,170 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestFineTuningJobNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.FineTuning.Jobs.New(context.TODO(), openai.FineTuningJobNewParams{ + Model: openai.F(openai.FineTuningJobNewParamsModelBabbage002), + TrainingFile: openai.F("file-abc123"), + Hyperparameters: openai.F(openai.FineTuningJobNewParamsHyperparameters{ + BatchSize: openai.F[openai.FineTuningJobNewParamsHyperparametersBatchSizeUnion](openai.FineTuningJobNewParamsHyperparametersBatchSizeString(openai.FineTuningJobNewParamsHyperparametersBatchSizeStringAuto)), + LearningRateMultiplier: openai.F[openai.FineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion](openai.FineTuningJobNewParamsHyperparametersLearningRateMultiplierString(openai.FineTuningJobNewParamsHyperparametersLearningRateMultiplierStringAuto)), + NEpochs: openai.F[openai.FineTuningJobNewParamsHyperparametersNEpochsUnion](openai.FineTuningJobNewParamsHyperparametersNEpochsString(openai.FineTuningJobNewParamsHyperparametersNEpochsStringAuto)), + }), + Integrations: openai.F([]openai.FineTuningJobNewParamsIntegration{{ + Type: openai.F(openai.FineTuningJobNewParamsIntegrationsTypeWandb), + Wandb: openai.F(openai.FineTuningJobNewParamsIntegrationsWandb{ + Project: openai.F("my-wandb-project"), + Name: openai.F("name"), + Entity: openai.F("entity"), + Tags: openai.F([]string{"custom-tag", "custom-tag", "custom-tag"}), + }), + }, { + Type: openai.F(openai.FineTuningJobNewParamsIntegrationsTypeWandb), + Wandb: openai.F(openai.FineTuningJobNewParamsIntegrationsWandb{ + Project: openai.F("my-wandb-project"), + Name: openai.F("name"), + Entity: openai.F("entity"), + Tags: openai.F([]string{"custom-tag", "custom-tag", "custom-tag"}), + }), + }, { + Type: openai.F(openai.FineTuningJobNewParamsIntegrationsTypeWandb), + Wandb: openai.F(openai.FineTuningJobNewParamsIntegrationsWandb{ + Project: openai.F("my-wandb-project"), + Name: openai.F("name"), + Entity: openai.F("entity"), + Tags: openai.F([]string{"custom-tag", "custom-tag", "custom-tag"}), + }), + }}), + Seed: openai.F(int64(42)), + Suffix: openai.F("x"), + ValidationFile: openai.F("file-abc123"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFineTuningJobGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.FineTuning.Jobs.Get(context.TODO(), "ft-AF1WoRqd3aJAHsqc9NY7iL8F") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFineTuningJobListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.FineTuning.Jobs.List(context.TODO(), openai.FineTuningJobListParams{ + After: openai.F("after"), + Limit: openai.F(int64(0)), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFineTuningJobCancel(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.FineTuning.Jobs.Cancel(context.TODO(), "ft-AF1WoRqd3aJAHsqc9NY7iL8F") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestFineTuningJobListEventsWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.FineTuning.Jobs.ListEvents( + context.TODO(), + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + openai.FineTuningJobListEventsParams{ + After: openai.F("after"), + Limit: openai.F(int64(0)), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/finetuningjobcheckpoint.go b/finetuningjobcheckpoint.go new file mode 100644 index 0000000..6279e24 --- /dev/null +++ b/finetuningjobcheckpoint.go @@ -0,0 +1,171 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/apiquery" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// FineTuningJobCheckpointService contains methods and other services that help +// with interacting with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewFineTuningJobCheckpointService] method instead. +type FineTuningJobCheckpointService struct { + Options []option.RequestOption +} + +// NewFineTuningJobCheckpointService generates a new service that applies the given +// options to each request. These options are applied after the parent client's +// options (if there is one), and before any request-specific options. +func NewFineTuningJobCheckpointService(opts ...option.RequestOption) (r *FineTuningJobCheckpointService) { + r = &FineTuningJobCheckpointService{} + r.Options = opts + return +} + +// List checkpoints for a fine-tuning job. +func (r *FineTuningJobCheckpointService) List(ctx context.Context, fineTuningJobID string, query FineTuningJobCheckpointListParams, opts ...option.RequestOption) (res *pagination.CursorPage[FineTuningJobCheckpoint], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + if fineTuningJobID == "" { + err = errors.New("missing required fine_tuning_job_id parameter") + return + } + path := fmt.Sprintf("fine_tuning/jobs/%s/checkpoints", fineTuningJobID) + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// List checkpoints for a fine-tuning job. +func (r *FineTuningJobCheckpointService) ListAutoPaging(ctx context.Context, fineTuningJobID string, query FineTuningJobCheckpointListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[FineTuningJobCheckpoint] { + return pagination.NewCursorPageAutoPager(r.List(ctx, fineTuningJobID, query, opts...)) +} + +// The `fine_tuning.job.checkpoint` object represents a model checkpoint for a +// fine-tuning job that is ready to use. +type FineTuningJobCheckpoint struct { + // The checkpoint identifier, which can be referenced in the API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the checkpoint was created. + CreatedAt int64 `json:"created_at,required"` + // The name of the fine-tuned checkpoint model that is created. + FineTunedModelCheckpoint string `json:"fine_tuned_model_checkpoint,required"` + // The name of the fine-tuning job that this checkpoint was created from. + FineTuningJobID string `json:"fine_tuning_job_id,required"` + // Metrics at the step number during the fine-tuning job. + Metrics FineTuningJobCheckpointMetrics `json:"metrics,required"` + // The object type, which is always "fine_tuning.job.checkpoint". + Object FineTuningJobCheckpointObject `json:"object,required"` + // The step number that the checkpoint was created at. + StepNumber int64 `json:"step_number,required"` + JSON fineTuningJobCheckpointJSON `json:"-"` +} + +// fineTuningJobCheckpointJSON contains the JSON metadata for the struct +// [FineTuningJobCheckpoint] +type fineTuningJobCheckpointJSON struct { + ID apijson.Field + CreatedAt apijson.Field + FineTunedModelCheckpoint apijson.Field + FineTuningJobID apijson.Field + Metrics apijson.Field + Object apijson.Field + StepNumber apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobCheckpoint) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobCheckpointJSON) RawJSON() string { + return r.raw +} + +// Metrics at the step number during the fine-tuning job. +type FineTuningJobCheckpointMetrics struct { + FullValidLoss float64 `json:"full_valid_loss"` + FullValidMeanTokenAccuracy float64 `json:"full_valid_mean_token_accuracy"` + Step float64 `json:"step"` + TrainLoss float64 `json:"train_loss"` + TrainMeanTokenAccuracy float64 `json:"train_mean_token_accuracy"` + ValidLoss float64 `json:"valid_loss"` + ValidMeanTokenAccuracy float64 `json:"valid_mean_token_accuracy"` + JSON fineTuningJobCheckpointMetricsJSON `json:"-"` +} + +// fineTuningJobCheckpointMetricsJSON contains the JSON metadata for the struct +// [FineTuningJobCheckpointMetrics] +type fineTuningJobCheckpointMetricsJSON struct { + FullValidLoss apijson.Field + FullValidMeanTokenAccuracy apijson.Field + Step apijson.Field + TrainLoss apijson.Field + TrainMeanTokenAccuracy apijson.Field + ValidLoss apijson.Field + ValidMeanTokenAccuracy apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FineTuningJobCheckpointMetrics) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r fineTuningJobCheckpointMetricsJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always "fine_tuning.job.checkpoint". +type FineTuningJobCheckpointObject string + +const ( + FineTuningJobCheckpointObjectFineTuningJobCheckpoint FineTuningJobCheckpointObject = "fine_tuning.job.checkpoint" +) + +func (r FineTuningJobCheckpointObject) IsKnown() bool { + switch r { + case FineTuningJobCheckpointObjectFineTuningJobCheckpoint: + return true + } + return false +} + +type FineTuningJobCheckpointListParams struct { + // Identifier for the last checkpoint ID from the previous pagination request. + After param.Field[string] `query:"after"` + // Number of checkpoints to retrieve. + Limit param.Field[int64] `query:"limit"` +} + +// URLQuery serializes [FineTuningJobCheckpointListParams]'s query parameters as +// `url.Values`. +func (r FineTuningJobCheckpointListParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/finetuningjobcheckpoint_test.go b/finetuningjobcheckpoint_test.go new file mode 100644 index 0000000..6f6cafc --- /dev/null +++ b/finetuningjobcheckpoint_test.go @@ -0,0 +1,43 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestFineTuningJobCheckpointListWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.FineTuning.Jobs.Checkpoints.List( + context.TODO(), + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + openai.FineTuningJobCheckpointListParams{ + After: openai.F("after"), + Limit: openai.F(int64(0)), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..1e064e6 --- /dev/null +++ b/go.mod @@ -0,0 +1,11 @@ +module github.com/openai/openai-go + +go 1.19 + +require ( + github.com/google/uuid v1.3.0 // indirect + github.com/tidwall/gjson v1.14.4 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..569e555 --- /dev/null +++ b/go.sum @@ -0,0 +1,12 @@ +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= diff --git a/image.go b/image.go new file mode 100644 index 0000000..89af593 --- /dev/null +++ b/image.go @@ -0,0 +1,421 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "bytes" + "context" + "io" + "mime/multipart" + "net/http" + + "github.com/openai/openai-go/internal/apiform" + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// ImageService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewImageService] method instead. +type ImageService struct { + Options []option.RequestOption +} + +// NewImageService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewImageService(opts ...option.RequestOption) (r *ImageService) { + r = &ImageService{} + r.Options = opts + return +} + +// Creates a variation of a given image. +func (r *ImageService) NewVariation(ctx context.Context, body ImageNewVariationParams, opts ...option.RequestOption) (res *ImagesResponse, err error) { + opts = append(r.Options[:], opts...) + path := "images/variations" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Creates an edited or extended image given an original image and a prompt. +func (r *ImageService) Edit(ctx context.Context, body ImageEditParams, opts ...option.RequestOption) (res *ImagesResponse, err error) { + opts = append(r.Options[:], opts...) + path := "images/edits" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Creates an image given a prompt. +func (r *ImageService) Generate(ctx context.Context, body ImageGenerateParams, opts ...option.RequestOption) (res *ImagesResponse, err error) { + opts = append(r.Options[:], opts...) + path := "images/generations" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Represents the url or the content of an image generated by the OpenAI API. +type Image struct { + // The base64-encoded JSON of the generated image, if `response_format` is + // `b64_json`. + B64JSON string `json:"b64_json"` + // The prompt that was used to generate the image, if there was any revision to the + // prompt. + RevisedPrompt string `json:"revised_prompt"` + // The URL of the generated image, if `response_format` is `url` (default). + URL string `json:"url"` + JSON imageJSON `json:"-"` +} + +// imageJSON contains the JSON metadata for the struct [Image] +type imageJSON struct { + B64JSON apijson.Field + RevisedPrompt apijson.Field + URL apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Image) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imageJSON) RawJSON() string { + return r.raw +} + +type ImagesResponse struct { + Created int64 `json:"created,required"` + Data []Image `json:"data,required"` + JSON imagesResponseJSON `json:"-"` +} + +// imagesResponseJSON contains the JSON metadata for the struct [ImagesResponse] +type imagesResponseJSON struct { + Created apijson.Field + Data apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ImagesResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r imagesResponseJSON) RawJSON() string { + return r.raw +} + +type ImageNewVariationParams struct { + // The image to use as the basis for the variation(s). Must be a valid PNG file, + // less than 4MB, and square. + Image param.Field[io.Reader] `json:"image,required" format:"binary"` + // The model to use for image generation. Only `dall-e-2` is supported at this + // time. + Model param.Field[ImageNewVariationParamsModel] `json:"model"` + // The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + // `n=1` is supported. + N param.Field[int64] `json:"n"` + // The format in which the generated images are returned. Must be one of `url` or + // `b64_json`. URLs are only valid for 60 minutes after the image has been + // generated. + ResponseFormat param.Field[ImageNewVariationParamsResponseFormat] `json:"response_format"` + // The size of the generated images. Must be one of `256x256`, `512x512`, or + // `1024x1024`. + Size param.Field[ImageNewVariationParamsSize] `json:"size"` + // A unique identifier representing your end-user, which can help OpenAI to monitor + // and detect abuse. + // [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + User param.Field[string] `json:"user"` +} + +func (r ImageNewVariationParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} + +type ImageNewVariationParamsModel string + +const ( + ImageNewVariationParamsModelDallE2 ImageNewVariationParamsModel = "dall-e-2" +) + +func (r ImageNewVariationParamsModel) IsKnown() bool { + switch r { + case ImageNewVariationParamsModelDallE2: + return true + } + return false +} + +// The format in which the generated images are returned. Must be one of `url` or +// `b64_json`. URLs are only valid for 60 minutes after the image has been +// generated. +type ImageNewVariationParamsResponseFormat string + +const ( + ImageNewVariationParamsResponseFormatURL ImageNewVariationParamsResponseFormat = "url" + ImageNewVariationParamsResponseFormatB64JSON ImageNewVariationParamsResponseFormat = "b64_json" +) + +func (r ImageNewVariationParamsResponseFormat) IsKnown() bool { + switch r { + case ImageNewVariationParamsResponseFormatURL, ImageNewVariationParamsResponseFormatB64JSON: + return true + } + return false +} + +// The size of the generated images. Must be one of `256x256`, `512x512`, or +// `1024x1024`. +type ImageNewVariationParamsSize string + +const ( + ImageNewVariationParamsSize256x256 ImageNewVariationParamsSize = "256x256" + ImageNewVariationParamsSize512x512 ImageNewVariationParamsSize = "512x512" + ImageNewVariationParamsSize1024x1024 ImageNewVariationParamsSize = "1024x1024" +) + +func (r ImageNewVariationParamsSize) IsKnown() bool { + switch r { + case ImageNewVariationParamsSize256x256, ImageNewVariationParamsSize512x512, ImageNewVariationParamsSize1024x1024: + return true + } + return false +} + +type ImageEditParams struct { + // The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + // is not provided, image must have transparency, which will be used as the mask. + Image param.Field[io.Reader] `json:"image,required" format:"binary"` + // A text description of the desired image(s). The maximum length is 1000 + // characters. + Prompt param.Field[string] `json:"prompt,required"` + // An additional image whose fully transparent areas (e.g. where alpha is zero) + // indicate where `image` should be edited. Must be a valid PNG file, less than + // 4MB, and have the same dimensions as `image`. + Mask param.Field[io.Reader] `json:"mask" format:"binary"` + // The model to use for image generation. Only `dall-e-2` is supported at this + // time. + Model param.Field[ImageEditParamsModel] `json:"model"` + // The number of images to generate. Must be between 1 and 10. + N param.Field[int64] `json:"n"` + // The format in which the generated images are returned. Must be one of `url` or + // `b64_json`. URLs are only valid for 60 minutes after the image has been + // generated. + ResponseFormat param.Field[ImageEditParamsResponseFormat] `json:"response_format"` + // The size of the generated images. Must be one of `256x256`, `512x512`, or + // `1024x1024`. + Size param.Field[ImageEditParamsSize] `json:"size"` + // A unique identifier representing your end-user, which can help OpenAI to monitor + // and detect abuse. + // [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + User param.Field[string] `json:"user"` +} + +func (r ImageEditParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} + +type ImageEditParamsModel string + +const ( + ImageEditParamsModelDallE2 ImageEditParamsModel = "dall-e-2" +) + +func (r ImageEditParamsModel) IsKnown() bool { + switch r { + case ImageEditParamsModelDallE2: + return true + } + return false +} + +// The format in which the generated images are returned. Must be one of `url` or +// `b64_json`. URLs are only valid for 60 minutes after the image has been +// generated. +type ImageEditParamsResponseFormat string + +const ( + ImageEditParamsResponseFormatURL ImageEditParamsResponseFormat = "url" + ImageEditParamsResponseFormatB64JSON ImageEditParamsResponseFormat = "b64_json" +) + +func (r ImageEditParamsResponseFormat) IsKnown() bool { + switch r { + case ImageEditParamsResponseFormatURL, ImageEditParamsResponseFormatB64JSON: + return true + } + return false +} + +// The size of the generated images. Must be one of `256x256`, `512x512`, or +// `1024x1024`. +type ImageEditParamsSize string + +const ( + ImageEditParamsSize256x256 ImageEditParamsSize = "256x256" + ImageEditParamsSize512x512 ImageEditParamsSize = "512x512" + ImageEditParamsSize1024x1024 ImageEditParamsSize = "1024x1024" +) + +func (r ImageEditParamsSize) IsKnown() bool { + switch r { + case ImageEditParamsSize256x256, ImageEditParamsSize512x512, ImageEditParamsSize1024x1024: + return true + } + return false +} + +type ImageGenerateParams struct { + // A text description of the desired image(s). The maximum length is 1000 + // characters for `dall-e-2` and 4000 characters for `dall-e-3`. + Prompt param.Field[string] `json:"prompt,required"` + // The model to use for image generation. + Model param.Field[ImageGenerateParamsModel] `json:"model"` + // The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + // `n=1` is supported. + N param.Field[int64] `json:"n"` + // The quality of the image that will be generated. `hd` creates images with finer + // details and greater consistency across the image. This param is only supported + // for `dall-e-3`. + Quality param.Field[ImageGenerateParamsQuality] `json:"quality"` + // The format in which the generated images are returned. Must be one of `url` or + // `b64_json`. URLs are only valid for 60 minutes after the image has been + // generated. + ResponseFormat param.Field[ImageGenerateParamsResponseFormat] `json:"response_format"` + // The size of the generated images. Must be one of `256x256`, `512x512`, or + // `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + // `1024x1792` for `dall-e-3` models. + Size param.Field[ImageGenerateParamsSize] `json:"size"` + // The style of the generated images. Must be one of `vivid` or `natural`. Vivid + // causes the model to lean towards generating hyper-real and dramatic images. + // Natural causes the model to produce more natural, less hyper-real looking + // images. This param is only supported for `dall-e-3`. + Style param.Field[ImageGenerateParamsStyle] `json:"style"` + // A unique identifier representing your end-user, which can help OpenAI to monitor + // and detect abuse. + // [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + User param.Field[string] `json:"user"` +} + +func (r ImageGenerateParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type ImageGenerateParamsModel string + +const ( + ImageGenerateParamsModelDallE2 ImageGenerateParamsModel = "dall-e-2" + ImageGenerateParamsModelDallE3 ImageGenerateParamsModel = "dall-e-3" +) + +func (r ImageGenerateParamsModel) IsKnown() bool { + switch r { + case ImageGenerateParamsModelDallE2, ImageGenerateParamsModelDallE3: + return true + } + return false +} + +// The quality of the image that will be generated. `hd` creates images with finer +// details and greater consistency across the image. This param is only supported +// for `dall-e-3`. +type ImageGenerateParamsQuality string + +const ( + ImageGenerateParamsQualityStandard ImageGenerateParamsQuality = "standard" + ImageGenerateParamsQualityHD ImageGenerateParamsQuality = "hd" +) + +func (r ImageGenerateParamsQuality) IsKnown() bool { + switch r { + case ImageGenerateParamsQualityStandard, ImageGenerateParamsQualityHD: + return true + } + return false +} + +// The format in which the generated images are returned. Must be one of `url` or +// `b64_json`. URLs are only valid for 60 minutes after the image has been +// generated. +type ImageGenerateParamsResponseFormat string + +const ( + ImageGenerateParamsResponseFormatURL ImageGenerateParamsResponseFormat = "url" + ImageGenerateParamsResponseFormatB64JSON ImageGenerateParamsResponseFormat = "b64_json" +) + +func (r ImageGenerateParamsResponseFormat) IsKnown() bool { + switch r { + case ImageGenerateParamsResponseFormatURL, ImageGenerateParamsResponseFormatB64JSON: + return true + } + return false +} + +// The size of the generated images. Must be one of `256x256`, `512x512`, or +// `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or +// `1024x1792` for `dall-e-3` models. +type ImageGenerateParamsSize string + +const ( + ImageGenerateParamsSize256x256 ImageGenerateParamsSize = "256x256" + ImageGenerateParamsSize512x512 ImageGenerateParamsSize = "512x512" + ImageGenerateParamsSize1024x1024 ImageGenerateParamsSize = "1024x1024" + ImageGenerateParamsSize1792x1024 ImageGenerateParamsSize = "1792x1024" + ImageGenerateParamsSize1024x1792 ImageGenerateParamsSize = "1024x1792" +) + +func (r ImageGenerateParamsSize) IsKnown() bool { + switch r { + case ImageGenerateParamsSize256x256, ImageGenerateParamsSize512x512, ImageGenerateParamsSize1024x1024, ImageGenerateParamsSize1792x1024, ImageGenerateParamsSize1024x1792: + return true + } + return false +} + +// The style of the generated images. Must be one of `vivid` or `natural`. Vivid +// causes the model to lean towards generating hyper-real and dramatic images. +// Natural causes the model to produce more natural, less hyper-real looking +// images. This param is only supported for `dall-e-3`. +type ImageGenerateParamsStyle string + +const ( + ImageGenerateParamsStyleVivid ImageGenerateParamsStyle = "vivid" + ImageGenerateParamsStyleNatural ImageGenerateParamsStyle = "natural" +) + +func (r ImageGenerateParamsStyle) IsKnown() bool { + switch r { + case ImageGenerateParamsStyleVivid, ImageGenerateParamsStyleNatural: + return true + } + return false +} diff --git a/image_test.go b/image_test.go new file mode 100644 index 0000000..755003d --- /dev/null +++ b/image_test.go @@ -0,0 +1,107 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestImageNewVariationWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Images.NewVariation(context.TODO(), openai.ImageNewVariationParams{ + Image: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + Model: openai.F(openai.ImageNewVariationParamsModelDallE2), + N: openai.F(int64(1)), + ResponseFormat: openai.F(openai.ImageNewVariationParamsResponseFormatURL), + Size: openai.F(openai.ImageNewVariationParamsSize1024x1024), + User: openai.F("user-1234"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestImageEditWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Images.Edit(context.TODO(), openai.ImageEditParams{ + Image: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + Prompt: openai.F("A cute baby sea otter wearing a beret"), + Mask: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + Model: openai.F(openai.ImageEditParamsModelDallE2), + N: openai.F(int64(1)), + ResponseFormat: openai.F(openai.ImageEditParamsResponseFormatURL), + Size: openai.F(openai.ImageEditParamsSize1024x1024), + User: openai.F("user-1234"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestImageGenerateWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Images.Generate(context.TODO(), openai.ImageGenerateParams{ + Prompt: openai.F("A cute baby sea otter"), + Model: openai.F(openai.ImageGenerateParamsModelDallE2), + N: openai.F(int64(1)), + Quality: openai.F(openai.ImageGenerateParamsQualityStandard), + ResponseFormat: openai.F(openai.ImageGenerateParamsResponseFormatURL), + Size: openai.F(openai.ImageGenerateParamsSize1024x1024), + Style: openai.F(openai.ImageGenerateParamsStyleVivid), + User: openai.F("user-1234"), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/internal/apierror/apierror.go b/internal/apierror/apierror.go new file mode 100644 index 0000000..81fa67c --- /dev/null +++ b/internal/apierror/apierror.go @@ -0,0 +1,61 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package apierror + +import ( + "fmt" + "net/http" + "net/http/httputil" + + "github.com/openai/openai-go/internal/apijson" +) + +// Error represents an error that originates from the API, i.e. when a request is +// made and the API returns a response with a HTTP status code. Other errors are +// not wrapped by this SDK. +type Error struct { + Code string `json:"code,required,nullable"` + Message string `json:"message,required"` + Param string `json:"param,required,nullable"` + Type string `json:"type,required"` + JSON errorJSON `json:"-"` + StatusCode int + Request *http.Request + Response *http.Response +} + +// errorJSON contains the JSON metadata for the struct [Error] +type errorJSON struct { + Code apijson.Field + Message apijson.Field + Param apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Error) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r errorJSON) RawJSON() string { + return r.raw +} + +func (r *Error) Error() string { + // Attempt to re-populate the response body + return fmt.Sprintf("%s \"%s\": %d %s %s", r.Request.Method, r.Request.URL, r.Response.StatusCode, http.StatusText(r.Response.StatusCode), r.JSON.RawJSON()) +} + +func (r *Error) DumpRequest(body bool) []byte { + if r.Request.GetBody != nil { + r.Request.Body, _ = r.Request.GetBody() + } + out, _ := httputil.DumpRequestOut(r.Request, body) + return out +} + +func (r *Error) DumpResponse(body bool) []byte { + out, _ := httputil.DumpResponse(r.Response, body) + return out +} diff --git a/internal/apiform/encoder.go b/internal/apiform/encoder.go new file mode 100644 index 0000000..58905ea --- /dev/null +++ b/internal/apiform/encoder.go @@ -0,0 +1,381 @@ +package apiform + +import ( + "fmt" + "io" + "mime/multipart" + "net/textproto" + "path" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/openai/openai-go/internal/param" +) + +var encoders sync.Map // map[encoderEntry]encoderFunc + +func Marshal(value interface{}, writer *multipart.Writer) error { + e := &encoder{dateFormat: time.RFC3339} + return e.marshal(value, writer) +} + +func MarshalRoot(value interface{}, writer *multipart.Writer) error { + e := &encoder{root: true, dateFormat: time.RFC3339} + return e.marshal(value, writer) +} + +type encoder struct { + dateFormat string + root bool +} + +type encoderFunc func(key string, value reflect.Value, writer *multipart.Writer) error + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (e *encoder) marshal(value interface{}, writer *multipart.Writer) error { + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil + } + typ := val.Type() + enc := e.typeEncoder(typ) + return enc("", val, writer) +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(key string, v reflect.Value, writer *multipart.Writer) error { + wg.Wait() + return f(key, v, writer) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder() + } + if t.ConvertibleTo(reflect.TypeOf((*io.Reader)(nil)).Elem()) { + return e.newReaderTypeEncoder() + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.typeEncoder(inner) + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if !v.IsValid() || v.IsNil() { + return nil + } + return innerEncoder(key, v.Elem(), writer) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Slice, reflect.Array: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + // Note that we could use `gjson` to encode these types but it would complicate our + // code more and this current code shouldn't cause any issues + case reflect.String: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, v.String()) + } + case reflect.Bool: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if v.Bool() { + return writer.WriteField(key, "true") + } + return writer.WriteField(key, "false") + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatUint(v.Uint(), 10)) + } + case reflect.Float32: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatFloat(v.Float(), 'f', -1, 32)) + } + case reflect.Float64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + default: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return fmt.Errorf("unknown type received at primitive encoder: %s", t.String()) + } + } +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + itemEncoder := e.typeEncoder(t.Elem()) + + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if key != "" { + key = key + "." + } + for i := 0; i < v.Len(); i++ { + err := itemEncoder(key+strconv.Itoa(i), v.Index(i), writer) + if err != nil { + return err + } + } + return nil + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.FieldLike)(nil)).Elem()) { + return e.newFieldTypeEncoder(t) + } + + encoderFields := []encoderField{} + extraEncoder := (*encoderField)(nil) + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseFormStructTag(field) + if !ok { + continue + } + // We only want to support unexported field if they're tagged with + // `extras` because that field shouldn't be part of the public API. We + // also want to only keep the top level extras + if ptag.extras && len(index) == 0 { + extraEncoder = &encoderField{ptag, e.typeEncoder(field.Type.Elem()), idx} + continue + } + if ptag.name == "-" { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + // Ensure deterministic output by sorting by lexicographic order + sort.Slice(encoderFields, func(i, j int) bool { + return encoderFields[i].tag.name < encoderFields[j].tag.name + }) + + return func(key string, value reflect.Value, writer *multipart.Writer) error { + if key != "" { + key = key + "." + } + + for _, ef := range encoderFields { + field := value.FieldByIndex(ef.idx) + err := ef.fn(key+ef.tag.name, field, writer) + if err != nil { + return err + } + } + + if extraEncoder != nil { + err := e.encodeMapEntries(key, value.FieldByIndex(extraEncoder.idx), writer) + if err != nil { + return err + } + } + + return nil + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(key string, value reflect.Value, writer *multipart.Writer) error { + present := value.FieldByName("Present") + if !present.Bool() { + return nil + } + null := value.FieldByName("Null") + if null.Bool() { + return nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(key, raw, writer) + } + return enc(key, value.FieldByName("Value"), writer) + } +} + +func (e *encoder) newTimeTypeEncoder() encoderFunc { + format := e.dateFormat + return func(key string, value reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format)) + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + value = value.Elem() + if !value.IsValid() { + return nil + } + return e.typeEncoder(value.Type())(key, value, writer) + } +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +func (e *encoder) newReaderTypeEncoder() encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + reader := value.Convert(reflect.TypeOf((*io.Reader)(nil)).Elem()).Interface().(io.Reader) + filename := "anonymous_file" + contentType := "application/octet-stream" + if named, ok := reader.(interface{ Name() string }); ok { + filename = path.Base(named.Name()) + } + if typed, ok := reader.(interface{ ContentType() string }); ok { + contentType = path.Base(typed.ContentType()) + } + + // Below is taken almost 1-for-1 from [multipart.CreateFormFile] + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, escapeQuotes(key), escapeQuotes(filename))) + h.Set("Content-Type", contentType) + filewriter, err := writer.CreatePart(h) + if err != nil { + return err + } + _, err = io.Copy(filewriter, reader) + return err + } +} + +// Given a []byte of json (may either be an empty object or an object that already contains entries) +// encode all of the entries in the map to the json byte array. +func (e *encoder) encodeMapEntries(key string, v reflect.Value, writer *multipart.Writer) error { + type mapPair struct { + key string + value reflect.Value + } + + if key != "" { + key = key + "." + } + + pairs := []mapPair{} + + iter := v.MapRange() + for iter.Next() { + if iter.Key().Type().Kind() == reflect.String { + pairs = append(pairs, mapPair{key: iter.Key().String(), value: iter.Value()}) + } else { + return fmt.Errorf("cannot encode a map with a non string key") + } + } + + // Ensure deterministic output + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].key < pairs[j].key + }) + + elementEncoder := e.typeEncoder(v.Type().Elem()) + for _, p := range pairs { + err := elementEncoder(key+string(p.key), p.value, writer) + if err != nil { + return err + } + } + + return nil +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + return e.encodeMapEntries(key, value, writer) + } +} diff --git a/internal/apiform/form.go b/internal/apiform/form.go new file mode 100644 index 0000000..5445116 --- /dev/null +++ b/internal/apiform/form.go @@ -0,0 +1,5 @@ +package apiform + +type Marshaler interface { + MarshalMultipart() ([]byte, string, error) +} diff --git a/internal/apiform/form_test.go b/internal/apiform/form_test.go new file mode 100644 index 0000000..39d1460 --- /dev/null +++ b/internal/apiform/form_test.go @@ -0,0 +1,440 @@ +package apiform + +import ( + "bytes" + "mime/multipart" + "strings" + "testing" + "time" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `form:"a"` + B int `form:"b"` + C uint `form:"c"` + D float64 `form:"d"` + E float32 `form:"e"` + F []int `form:"f"` +} + +type PrimitivePointers struct { + A *bool `form:"a"` + B *int `form:"b"` + C *uint `form:"c"` + D *float64 `form:"d"` + E *float32 `form:"e"` + F *[]int `form:"f"` +} + +type Slices struct { + Slice []Primitives `form:"slices"` +} + +type DateTime struct { + Date time.Time `form:"date" format:"date"` + DateTime time.Time `form:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `form:"a"` + Extras map[string]interface{} `form:"-,extras"` +} + +type TypedAdditionalProperties struct { + A bool `form:"a"` + Extras map[string]int `form:"-,extras"` +} + +type EmbeddedStructs struct { + AdditionalProperties + A *int `form:"number2"` + Extras map[string]interface{} `form:"-,extras"` +} + +type Recursive struct { + Name string `form:"name"` + Child *Recursive `form:"child"` +} + +type UnknownStruct struct { + Unknown interface{} `form:"unknown"` +} + +type UnionStruct struct { + Union Union `form:"union" format:"date"` +} + +type Union interface { + union() +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionStructA struct { + Type string `form:"type"` + A string `form:"a"` + B string `form:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `form:"type"` + A string `form:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +type ReaderStruct struct { +} + +var tests = map[string]struct { + buf string + val interface{} +}{ + "map_string": { + `--xxx +Content-Disposition: form-data; name="foo" + +bar +--xxx-- +`, + map[string]string{"foo": "bar"}, + }, + + "map_interface": { + `--xxx +Content-Disposition: form-data; name="a" + +1 +--xxx +Content-Disposition: form-data; name="b" + +str +--xxx +Content-Disposition: form-data; name="c" + +false +--xxx-- +`, + map[string]interface{}{"a": float64(1), "b": "str", "c": false}, + }, + + "primitive_struct": { + `--xxx +Content-Disposition: form-data; name="a" + +false +--xxx +Content-Disposition: form-data; name="b" + +237628372683 +--xxx +Content-Disposition: form-data; name="c" + +654 +--xxx +Content-Disposition: form-data; name="d" + +9999.43 +--xxx +Content-Disposition: form-data; name="e" + +43.76 +--xxx +Content-Disposition: form-data; name="f.0" + +1 +--xxx +Content-Disposition: form-data; name="f.1" + +2 +--xxx +Content-Disposition: form-data; name="f.2" + +3 +--xxx +Content-Disposition: form-data; name="f.3" + +4 +--xxx-- +`, + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + + "slices": { + `--xxx +Content-Disposition: form-data; name="slices.0.a" + +false +--xxx +Content-Disposition: form-data; name="slices.0.b" + +237628372683 +--xxx +Content-Disposition: form-data; name="slices.0.c" + +654 +--xxx +Content-Disposition: form-data; name="slices.0.d" + +9999.43 +--xxx +Content-Disposition: form-data; name="slices.0.e" + +43.76 +--xxx +Content-Disposition: form-data; name="slices.0.f.0" + +1 +--xxx +Content-Disposition: form-data; name="slices.0.f.1" + +2 +--xxx +Content-Disposition: form-data; name="slices.0.f.2" + +3 +--xxx +Content-Disposition: form-data; name="slices.0.f.3" + +4 +--xxx-- +`, + Slices{ + Slice: []Primitives{{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}}, + }, + }, + + "primitive_pointer_struct": { + `--xxx +Content-Disposition: form-data; name="a" + +false +--xxx +Content-Disposition: form-data; name="b" + +237628372683 +--xxx +Content-Disposition: form-data; name="c" + +654 +--xxx +Content-Disposition: form-data; name="d" + +9999.43 +--xxx +Content-Disposition: form-data; name="e" + +43.76 +--xxx +Content-Disposition: form-data; name="f.0" + +1 +--xxx +Content-Disposition: form-data; name="f.1" + +2 +--xxx +Content-Disposition: form-data; name="f.2" + +3 +--xxx +Content-Disposition: form-data; name="f.3" + +4 +--xxx +Content-Disposition: form-data; name="f.4" + +5 +--xxx-- +`, + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + }, + + "datetime_struct": { + `--xxx +Content-Disposition: form-data; name="date" + +2006-01-02 +--xxx +Content-Disposition: form-data; name="date-time" + +2006-01-02T15:04:05Z +--xxx-- +`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + }, + + "additional_properties": { + `--xxx +Content-Disposition: form-data; name="a" + +true +--xxx +Content-Disposition: form-data; name="bar" + +value +--xxx +Content-Disposition: form-data; name="foo" + +true +--xxx-- +`, + AdditionalProperties{ + A: true, + Extras: map[string]interface{}{ + "bar": "value", + "foo": true, + }, + }, + }, + + "recursive_struct": { + `--xxx +Content-Disposition: form-data; name="child.name" + +Alex +--xxx +Content-Disposition: form-data; name="name" + +Robert +--xxx-- +`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "unknown_struct_number": { + `--xxx +Content-Disposition: form-data; name="unknown" + +12 +--xxx-- +`, + UnknownStruct{ + Unknown: 12., + }, + }, + + "unknown_struct_map": { + `--xxx +Content-Disposition: form-data; name="unknown.foo" + +bar +--xxx-- +`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + "union_integer": { + `--xxx +Content-Disposition: form-data; name="union" + +12 +--xxx-- +`, + UnionStruct{ + Union: UnionInteger(12), + }, + }, + + "union_struct_discriminated_a": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.b" + +bar +--xxx +Content-Disposition: form-data; name="union.type" + +typeA +--xxx-- +`, + + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + }, + + "union_struct_discriminated_b": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.type" + +typeB +--xxx-- +`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + }, + + "union_struct_time": { + `--xxx +Content-Disposition: form-data; name="union" + +2010-05-23 +--xxx-- +`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + }, +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + writer.SetBoundary("xxx") + err := Marshal(test.val, writer) + if err != nil { + t.Errorf("serialization of %v failed with error %v", test.val, err) + } + err = writer.Close() + if err != nil { + t.Errorf("serialization of %v failed with error %v", test.val, err) + } + raw := buf.Bytes() + if string(raw) != strings.ReplaceAll(test.buf, "\n", "\r\n") { + t.Errorf("expected %+#v to serialize to '%s' but got '%s'", test.val, test.buf, string(raw)) + } + }) + } +} diff --git a/internal/apiform/tag.go b/internal/apiform/tag.go new file mode 100644 index 0000000..b22e054 --- /dev/null +++ b/internal/apiform/tag.go @@ -0,0 +1,48 @@ +package apiform + +import ( + "reflect" + "strings" +) + +const jsonStructTag = "json" +const formStructTag = "form" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + required bool + extras bool + metadata bool +} + +func parseFormStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(formStructTag) + if !ok { + raw, ok = field.Tag.Lookup(jsonStructTag) + } + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "required": + tag.required = true + case "extras": + tag.extras = true + case "metadata": + tag.metadata = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/internal/apijson/decoder.go b/internal/apijson/decoder.go new file mode 100644 index 0000000..e1b21b7 --- /dev/null +++ b/internal/apijson/decoder.go @@ -0,0 +1,668 @@ +package apijson + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "sync" + "time" + "unsafe" + + "github.com/tidwall/gjson" +) + +// decoders is a synchronized map with roughly the following type: +// map[reflect.Type]decoderFunc +var decoders sync.Map + +// Unmarshal is similar to [encoding/json.Unmarshal] and parses the JSON-encoded +// data and stores it in the given pointer. +func Unmarshal(raw []byte, to any) error { + d := &decoderBuilder{dateFormat: time.RFC3339} + return d.unmarshal(raw, to) +} + +// UnmarshalRoot is like Unmarshal, but doesn't try to call MarshalJSON on the +// root element. Useful if a struct's UnmarshalJSON is overrode to use the +// behavior of this encoder versus the standard library. +func UnmarshalRoot(raw []byte, to any) error { + d := &decoderBuilder{dateFormat: time.RFC3339, root: true} + return d.unmarshal(raw, to) +} + +// decoderBuilder contains the 'compile-time' state of the decoder. +type decoderBuilder struct { + // Whether or not this is the first element and called by [UnmarshalRoot], see + // the documentation there to see why this is necessary. + root bool + // The dateFormat (a format string for [time.Format]) which is chosen by the + // last struct tag that was seen. + dateFormat string +} + +// decoderState contains the 'run-time' state of the decoder. +type decoderState struct { + strict bool + exactness exactness +} + +// Exactness refers to how close to the type the result was if deserialization +// was successful. This is useful in deserializing unions, where you want to try +// each entry, first with strict, then with looser validation, without actually +// having to do a lot of redundant work by marshalling twice (or maybe even more +// times). +type exactness int8 + +const ( + // Some values had to fudged a bit, for example by converting a string to an + // int, or an enum with extra values. + loose exactness = iota + // There are some extra arguments, but other wise it matches the union. + extras + // Exactly right. + exact +) + +type decoderFunc func(node gjson.Result, value reflect.Value, state *decoderState) error + +type decoderField struct { + tag parsedStructTag + fn decoderFunc + idx []int + goname string +} + +type decoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (d *decoderBuilder) unmarshal(raw []byte, to any) error { + value := reflect.ValueOf(to).Elem() + result := gjson.ParseBytes(raw) + if !value.IsValid() { + return fmt.Errorf("apijson: cannot marshal into invalid value") + } + return d.typeDecoder(value.Type())(result, value, &decoderState{strict: false, exactness: exact}) +} + +func (d *decoderBuilder) typeDecoder(t reflect.Type) decoderFunc { + entry := decoderEntry{ + Type: t, + dateFormat: d.dateFormat, + root: d.root, + } + + if fi, ok := decoders.Load(entry); ok { + return fi.(decoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f decoderFunc + ) + wg.Add(1) + fi, loaded := decoders.LoadOrStore(entry, decoderFunc(func(node gjson.Result, v reflect.Value, state *decoderState) error { + wg.Wait() + return f(node, v, state) + })) + if loaded { + return fi.(decoderFunc) + } + + // Compute the real decoder and replace the indirect func with it. + f = d.newTypeDecoder(t) + wg.Done() + decoders.Store(entry, f) + return f +} + +func indirectUnmarshalerDecoder(n gjson.Result, v reflect.Value, state *decoderState) error { + return v.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) +} + +func unmarshalerDecoder(n gjson.Result, v reflect.Value, state *decoderState) error { + if v.Kind() == reflect.Pointer && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + return v.Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) +} + +func (d *decoderBuilder) newTypeDecoder(t reflect.Type) decoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return d.newTimeTypeDecoder(t) + } + if !d.root && t.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) { + return unmarshalerDecoder + } + if !d.root && reflect.PointerTo(t).Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) { + return indirectUnmarshalerDecoder + } + d.root = false + + if _, ok := unionRegistry[t]; ok { + return d.newUnionDecoder(t) + } + + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + innerDecoder := d.typeDecoder(inner) + + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + if !v.IsValid() { + return fmt.Errorf("apijson: unexpected invalid reflection value %+#v", v) + } + + newValue := reflect.New(inner).Elem() + err := innerDecoder(n, newValue, state) + if err != nil { + return err + } + + v.Set(newValue.Addr()) + return nil + } + case reflect.Struct: + return d.newStructTypeDecoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return d.newArrayTypeDecoder(t) + case reflect.Map: + return d.newMapDecoder(t) + case reflect.Interface: + return func(node gjson.Result, value reflect.Value, state *decoderState) error { + if !value.IsValid() { + return fmt.Errorf("apijson: unexpected invalid value %+#v", value) + } + if node.Value() != nil && value.CanSet() { + value.Set(reflect.ValueOf(node.Value())) + } + return nil + } + default: + return d.newPrimitiveTypeDecoder(t) + } +} + +// newUnionDecoder returns a decoderFunc that deserializes into a union using an +// algorithm roughly similar to Pydantic's [smart algorithm]. +// +// Conceptually this is equivalent to choosing the best schema based on how 'exact' +// the deserialization is for each of the schemas. +// +// If there is a tie in the level of exactness, then the tie is broken +// left-to-right. +// +// [smart algorithm]: https://docs.pydantic.dev/latest/concepts/unions/#smart-mode +func (d *decoderBuilder) newUnionDecoder(t reflect.Type) decoderFunc { + unionEntry, ok := unionRegistry[t] + if !ok { + panic("apijson: couldn't find union of type " + t.String() + " in union registry") + } + decoders := []decoderFunc{} + for _, variant := range unionEntry.variants { + decoder := d.typeDecoder(variant.Type) + decoders = append(decoders, decoder) + } + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + // If there is a discriminator match, circumvent the exactness logic entirely + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { + continue + } + + if len(unionEntry.discriminatorKey) != 0 { + discriminatorValue := n.Get(unionEntry.discriminatorKey).Value() + if discriminatorValue == variant.DiscriminatorValue { + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, state) + v.Set(inner) + return err + } + } + } + + // Set bestExactness to worse than loose + bestExactness := loose - 1 + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { + continue + } + sub := decoderState{strict: state.strict, exactness: exact} + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, &sub) + if err != nil { + continue + } + if sub.exactness == exact { + v.Set(inner) + return nil + } + if sub.exactness > bestExactness { + v.Set(inner) + bestExactness = sub.exactness + } + } + + if bestExactness < loose { + return errors.New("apijson: was not able to coerce type as union") + } + + if guardStrict(state, bestExactness != exact) { + return errors.New("apijson: was not able to coerce type as union strictly") + } + + return nil + } +} + +func (d *decoderBuilder) newMapDecoder(t reflect.Type) decoderFunc { + keyType := t.Key() + itemType := t.Elem() + itemDecoder := d.typeDecoder(itemType) + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + mapValue := reflect.MakeMapWithSize(t, len(node.Map())) + + node.ForEach(func(key, value gjson.Result) bool { + // It's fine for us to just use `ValueOf` here because the key types will + // always be primitive types so we don't need to decode it using the standard pattern + keyValue := reflect.ValueOf(key.Value()) + if !keyValue.IsValid() { + if err == nil { + err = fmt.Errorf("apijson: received invalid key type %v", keyValue.String()) + } + return false + } + if keyValue.Type() != keyType { + if err == nil { + err = fmt.Errorf("apijson: expected key type %v but got %v", keyType, keyValue.Type()) + } + return false + } + + itemValue := reflect.New(itemType).Elem() + itemerr := itemDecoder(value, itemValue, state) + if itemerr != nil { + if err == nil { + err = itemerr + } + return false + } + + mapValue.SetMapIndex(keyValue, itemValue) + return true + }) + + if err != nil { + return err + } + value.Set(mapValue) + return nil + } +} + +func (d *decoderBuilder) newArrayTypeDecoder(t reflect.Type) decoderFunc { + itemDecoder := d.typeDecoder(t.Elem()) + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + if !node.IsArray() { + return fmt.Errorf("apijson: could not deserialize to an array") + } + + arrayNode := node.Array() + + arrayValue := reflect.MakeSlice(reflect.SliceOf(t.Elem()), len(arrayNode), len(arrayNode)) + for i, itemNode := range arrayNode { + err = itemDecoder(itemNode, arrayValue.Index(i), state) + if err != nil { + return err + } + } + + value.Set(arrayValue) + return nil + } +} + +func (d *decoderBuilder) newStructTypeDecoder(t reflect.Type) decoderFunc { + // map of json field name to struct field decoders + decoderFields := map[string]decoderField{} + anonymousDecoders := []decoderField{} + extraDecoder := (*decoderField)(nil) + inlineDecoder := (*decoderField)(nil) + + for i := 0; i < t.NumField(); i++ { + idx := []int{i} + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the fields and get their encoders as well. + if field.Anonymous { + anonymousDecoders = append(anonymousDecoders, decoderField{ + fn: d.typeDecoder(field.Type), + idx: idx[:], + }) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported fields if they're tagged with + // `extras` because that field shouldn't be part of the public API. + if ptag.extras { + extraDecoder = &decoderField{ptag, d.typeDecoder(field.Type.Elem()), idx, field.Name} + continue + } + if ptag.inline { + inlineDecoder = &decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + continue + } + if ptag.metadata { + continue + } + + oldFormat := d.dateFormat + dateFormat, ok := parseFormatStructTag(field) + if ok { + switch dateFormat { + case "date-time": + d.dateFormat = time.RFC3339 + case "date": + d.dateFormat = "2006-01-02" + } + } + decoderFields[ptag.name] = decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + d.dateFormat = oldFormat + } + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + if field := value.FieldByName("JSON"); field.IsValid() { + if raw := field.FieldByName("raw"); raw.IsValid() { + setUnexportedField(raw, node.Raw) + } + } + + for _, decoder := range anonymousDecoders { + // ignore errors + decoder.fn(node, value.FieldByIndex(decoder.idx), state) + } + + if inlineDecoder != nil { + var meta Field + dest := value.FieldByIndex(inlineDecoder.idx) + isValid := false + if dest.IsValid() && node.Type != gjson.Null { + err = inlineDecoder.fn(node, dest, state) + if err == nil { + isValid = true + } + } + + if node.Type == gjson.Null { + meta = Field{ + raw: node.Raw, + status: null, + } + } else if !isValid { + meta = Field{ + raw: node.Raw, + status: invalid, + } + } else if isValid { + meta = Field{ + raw: node.Raw, + status: valid, + } + } + if metadata := getSubField(value, inlineDecoder.idx, inlineDecoder.goname); metadata.IsValid() { + metadata.Set(reflect.ValueOf(meta)) + } + return err + } + + typedExtraType := reflect.Type(nil) + typedExtraFields := reflect.Value{} + if extraDecoder != nil { + typedExtraType = value.FieldByIndex(extraDecoder.idx).Type() + typedExtraFields = reflect.MakeMap(typedExtraType) + } + untypedExtraFields := map[string]Field{} + + for fieldName, itemNode := range node.Map() { + df, explicit := decoderFields[fieldName] + var ( + dest reflect.Value + fn decoderFunc + meta Field + ) + if explicit { + fn = df.fn + dest = value.FieldByIndex(df.idx) + } + if !explicit && extraDecoder != nil { + dest = reflect.New(typedExtraType.Elem()).Elem() + fn = extraDecoder.fn + } + + isValid := false + if dest.IsValid() && itemNode.Type != gjson.Null { + err = fn(itemNode, dest, state) + if err == nil { + isValid = true + } + } + + if itemNode.Type == gjson.Null { + meta = Field{ + raw: itemNode.Raw, + status: null, + } + } else if !isValid { + meta = Field{ + raw: itemNode.Raw, + status: invalid, + } + } else if isValid { + meta = Field{ + raw: itemNode.Raw, + status: valid, + } + } + + if explicit { + if metadata := getSubField(value, df.idx, df.goname); metadata.IsValid() { + metadata.Set(reflect.ValueOf(meta)) + } + } + if !explicit { + untypedExtraFields[fieldName] = meta + } + if !explicit && extraDecoder != nil { + typedExtraFields.SetMapIndex(reflect.ValueOf(fieldName), dest) + } + } + + if extraDecoder != nil && typedExtraFields.Len() > 0 { + value.FieldByIndex(extraDecoder.idx).Set(typedExtraFields) + } + + // Set exactness to 'extras' if there are untyped, extra fields. + if len(untypedExtraFields) > 0 && state.exactness > extras { + state.exactness = extras + } + + if metadata := getSubField(value, []int{-1}, "ExtraFields"); metadata.IsValid() && len(untypedExtraFields) > 0 { + metadata.Set(reflect.ValueOf(untypedExtraFields)) + } + return nil + } +} + +func (d *decoderBuilder) newPrimitiveTypeDecoder(t reflect.Type) decoderFunc { + switch t.Kind() { + case reflect.String: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetString(n.String()) + if guardStrict(state, n.Type != gjson.String) { + return fmt.Errorf("apijson: failed to parse string strictly") + } + // Everything that is not an object can be loosely stringified. + if n.Type == gjson.JSON { + return fmt.Errorf("apijson: failed to parse string") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed string enum validation") + } + return nil + } + case reflect.Bool: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetBool(n.Bool()) + if guardStrict(state, n.Type != gjson.True && n.Type != gjson.False) { + return fmt.Errorf("apijson: failed to parse bool strictly") + } + // Numbers and strings that are either 'true' or 'false' can be loosely + // deserialized as bool. + if n.Type == gjson.String && (n.Raw != "true" && n.Raw != "false") || n.Type == gjson.JSON { + return fmt.Errorf("apijson: failed to parse bool") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed bool enum validation") + } + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetInt(n.Int()) + if guardStrict(state, n.Type != gjson.Number || n.Num != float64(int(n.Num))) { + return fmt.Errorf("apijson: failed to parse int strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as numbers. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse int") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed int enum validation") + } + return nil + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetUint(n.Uint()) + if guardStrict(state, n.Type != gjson.Number || n.Num != float64(int(n.Num)) || n.Num < 0) { + return fmt.Errorf("apijson: failed to parse uint strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as uint. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse uint") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed uint enum validation") + } + return nil + } + case reflect.Float32, reflect.Float64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetFloat(n.Float()) + if guardStrict(state, n.Type != gjson.Number) { + return fmt.Errorf("apijson: failed to parse float strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as floats. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse float") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed float enum validation") + } + return nil + } + default: + return func(node gjson.Result, v reflect.Value, state *decoderState) error { + return fmt.Errorf("unknown type received at primitive decoder: %s", t.String()) + } + } +} + +func (d *decoderBuilder) newTimeTypeDecoder(t reflect.Type) decoderFunc { + format := d.dateFormat + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + parsed, err := time.Parse(format, n.Str) + if err == nil { + v.Set(reflect.ValueOf(parsed).Convert(t)) + return nil + } + + if guardStrict(state, true) { + return err + } + + layouts := []string{ + "2006-01-02", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05Z07:00", + "2006-01-02 15:04:05Z0700", + "2006-01-02 15:04:05", + } + + for _, layout := range layouts { + parsed, err := time.Parse(layout, n.Str) + if err == nil { + v.Set(reflect.ValueOf(parsed).Convert(t)) + return nil + } + } + + return fmt.Errorf("unable to leniently parse date-time string: %s", n.Str) + } +} + +func setUnexportedField(field reflect.Value, value interface{}) { + reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Set(reflect.ValueOf(value)) +} + +func guardStrict(state *decoderState, cond bool) bool { + if !cond { + return false + } + + if state.strict { + return true + } + + state.exactness = loose + return false +} + +func canParseAsNumber(str string) bool { + _, err := strconv.ParseFloat(str, 64) + return err == nil +} + +func guardUnknown(state *decoderState, v reflect.Value) bool { + if have, ok := v.Interface().(interface{ IsKnown() bool }); guardStrict(state, ok && !have.IsKnown()) { + return true + } + return false +} diff --git a/internal/apijson/encoder.go b/internal/apijson/encoder.go new file mode 100644 index 0000000..13d8903 --- /dev/null +++ b/internal/apijson/encoder.go @@ -0,0 +1,391 @@ +package apijson + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/tidwall/sjson" + + "github.com/openai/openai-go/internal/param" +) + +var encoders sync.Map // map[encoderEntry]encoderFunc + +func Marshal(value interface{}) ([]byte, error) { + e := &encoder{dateFormat: time.RFC3339} + return e.marshal(value) +} + +func MarshalRoot(value interface{}) ([]byte, error) { + e := &encoder{root: true, dateFormat: time.RFC3339} + return e.marshal(value) +} + +type encoder struct { + dateFormat string + root bool +} + +type encoderFunc func(value reflect.Value) ([]byte, error) + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (e *encoder) marshal(value interface{}) ([]byte, error) { + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil, nil + } + typ := val.Type() + enc := e.typeEncoder(typ) + return enc(val) +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(v reflect.Value) ([]byte, error) { + wg.Wait() + return f(v) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func marshalerEncoder(v reflect.Value) ([]byte, error) { + return v.Interface().(json.Marshaler).MarshalJSON() +} + +func indirectMarshalerEncoder(v reflect.Value) ([]byte, error) { + return v.Addr().Interface().(json.Marshaler).MarshalJSON() +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder() + } + if !e.root && t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return marshalerEncoder + } + if !e.root && reflect.PointerTo(t).Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return indirectMarshalerEncoder + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.typeEncoder(inner) + return func(v reflect.Value) ([]byte, error) { + if !v.IsValid() || v.IsNil() { + return nil, nil + } + return innerEncoder(v.Elem()) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + // Note that we could use `gjson` to encode these types but it would complicate our + // code more and this current code shouldn't cause any issues + case reflect.String: + return func(v reflect.Value) ([]byte, error) { + return []byte(fmt.Sprintf("%q", v.String())), nil + } + case reflect.Bool: + return func(v reflect.Value) ([]byte, error) { + if v.Bool() { + return []byte("true"), nil + } + return []byte("false"), nil + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatInt(v.Int(), 10)), nil + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatUint(v.Uint(), 10)), nil + } + case reflect.Float32: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatFloat(v.Float(), 'f', -1, 32)), nil + } + case reflect.Float64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatFloat(v.Float(), 'f', -1, 64)), nil + } + default: + return func(v reflect.Value) ([]byte, error) { + return nil, fmt.Errorf("unknown type received at primitive encoder: %s", t.String()) + } + } +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + itemEncoder := e.typeEncoder(t.Elem()) + + return func(value reflect.Value) ([]byte, error) { + json := []byte("[]") + for i := 0; i < value.Len(); i++ { + var value, err = itemEncoder(value.Index(i)) + if err != nil { + return nil, err + } + if value == nil { + // Assume that empty items should be inserted as `null` so that the output array + // will be the same length as the input array + value = []byte("null") + } + + json, err = sjson.SetRawBytes(json, "-1", value) + if err != nil { + return nil, err + } + } + + return json, nil + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.FieldLike)(nil)).Elem()) { + return e.newFieldTypeEncoder(t) + } + + encoderFields := []encoderField{} + extraEncoder := (*encoderField)(nil) + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported field if they're tagged with + // `extras` because that field shouldn't be part of the public API. We + // also want to only keep the top level extras + if ptag.extras && len(index) == 0 { + extraEncoder = &encoderField{ptag, e.typeEncoder(field.Type.Elem()), idx} + continue + } + if ptag.name == "-" { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + // Ensure deterministic output by sorting by lexicographic order + sort.Slice(encoderFields, func(i, j int) bool { + return encoderFields[i].tag.name < encoderFields[j].tag.name + }) + + return func(value reflect.Value) (json []byte, err error) { + json = []byte("{}") + + for _, ef := range encoderFields { + field := value.FieldByIndex(ef.idx) + encoded, err := ef.fn(field) + if err != nil { + return nil, err + } + if encoded == nil { + continue + } + json, err = sjson.SetRawBytes(json, ef.tag.name, encoded) + if err != nil { + return nil, err + } + } + + if extraEncoder != nil { + json, err = e.encodeMapEntries(json, value.FieldByIndex(extraEncoder.idx)) + if err != nil { + return nil, err + } + } + return + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(value reflect.Value) (json []byte, err error) { + present := value.FieldByName("Present") + if !present.Bool() { + return nil, nil + } + null := value.FieldByName("Null") + if null.Bool() { + return []byte("null"), nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(raw) + } + return enc(value.FieldByName("Value")) + } +} + +func (e *encoder) newTimeTypeEncoder() encoderFunc { + format := e.dateFormat + return func(value reflect.Value) (json []byte, err error) { + return []byte(`"` + value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format) + `"`), nil + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(value reflect.Value) ([]byte, error) { + value = value.Elem() + if !value.IsValid() { + return nil, nil + } + return e.typeEncoder(value.Type())(value) + } +} + +// Given a []byte of json (may either be an empty object or an object that already contains entries) +// encode all of the entries in the map to the json byte array. +func (e *encoder) encodeMapEntries(json []byte, v reflect.Value) ([]byte, error) { + type mapPair struct { + key []byte + value reflect.Value + } + + pairs := []mapPair{} + keyEncoder := e.typeEncoder(v.Type().Key()) + + iter := v.MapRange() + for iter.Next() { + var encodedKey []byte + if iter.Key().Type().Kind() == reflect.String { + encodedKey = []byte(iter.Key().String()) + } else { + var err error + encodedKey, err = keyEncoder(iter.Key()) + if err != nil { + return nil, err + } + } + pairs = append(pairs, mapPair{key: encodedKey, value: iter.Value()}) + } + + // Ensure deterministic output + sort.Slice(pairs, func(i, j int) bool { + return bytes.Compare(pairs[i].key, pairs[j].key) < 0 + }) + + elementEncoder := e.typeEncoder(v.Type().Elem()) + for _, p := range pairs { + encodedValue, err := elementEncoder(p.value) + if err != nil { + return nil, err + } + if len(encodedValue) == 0 { + continue + } + json, err = sjson.SetRawBytes(json, string(p.key), encodedValue) + if err != nil { + return nil, err + } + } + + return json, nil +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + return func(value reflect.Value) ([]byte, error) { + json := []byte("{}") + var err error + json, err = e.encodeMapEntries(json, value) + if err != nil { + return nil, err + } + return json, nil + } +} diff --git a/internal/apijson/field.go b/internal/apijson/field.go new file mode 100644 index 0000000..3ef207c --- /dev/null +++ b/internal/apijson/field.go @@ -0,0 +1,41 @@ +package apijson + +import "reflect" + +type status uint8 + +const ( + missing status = iota + null + invalid + valid +) + +type Field struct { + raw string + status status +} + +// Returns true if the field is explicitly `null` _or_ if it is not present at all (ie, missing). +// To check if the field's key is present in the JSON with an explicit null value, +// you must check `f.IsNull() && !f.IsMissing()`. +func (j Field) IsNull() bool { return j.status <= null } +func (j Field) IsMissing() bool { return j.status == missing } +func (j Field) IsInvalid() bool { return j.status == invalid } +func (j Field) Raw() string { return j.raw } + +func getSubField(root reflect.Value, index []int, name string) reflect.Value { + strct := root.FieldByIndex(index[:len(index)-1]) + if !strct.IsValid() { + panic("couldn't find encapsulating struct for field " + name) + } + meta := strct.FieldByName("JSON") + if !meta.IsValid() { + return reflect.Value{} + } + field := meta.FieldByName(name) + if !field.IsValid() { + return reflect.Value{} + } + return field +} diff --git a/internal/apijson/field_test.go b/internal/apijson/field_test.go new file mode 100644 index 0000000..053a1ed --- /dev/null +++ b/internal/apijson/field_test.go @@ -0,0 +1,66 @@ +package apijson + +import ( + "testing" + "time" + + "github.com/openai/openai-go/internal/param" +) + +type Struct struct { + A string `json:"a"` + B int64 `json:"b"` +} + +type FieldStruct struct { + A param.Field[string] `json:"a"` + B param.Field[int64] `json:"b"` + C param.Field[Struct] `json:"c"` + D param.Field[time.Time] `json:"d" format:"date"` + E param.Field[time.Time] `json:"e" format:"date-time"` + F param.Field[int64] `json:"f"` +} + +func TestFieldMarshal(t *testing.T) { + tests := map[string]struct { + value interface{} + expected string + }{ + "null_string": {param.Field[string]{Present: true, Null: true}, "null"}, + "null_int": {param.Field[int]{Present: true, Null: true}, "null"}, + "null_int64": {param.Field[int64]{Present: true, Null: true}, "null"}, + "null_struct": {param.Field[Struct]{Present: true, Null: true}, "null"}, + + "string": {param.Field[string]{Present: true, Value: "string"}, `"string"`}, + "int": {param.Field[int]{Present: true, Value: 123}, "123"}, + "int64": {param.Field[int64]{Present: true, Value: int64(123456789123456789)}, "123456789123456789"}, + "struct": {param.Field[Struct]{Present: true, Value: Struct{A: "yo", B: 123}}, `{"a":"yo","b":123}`}, + + "string_raw": {param.Field[int]{Present: true, Raw: "string"}, `"string"`}, + "int_raw": {param.Field[int]{Present: true, Raw: 123}, "123"}, + "int64_raw": {param.Field[int]{Present: true, Raw: int64(123456789123456789)}, "123456789123456789"}, + "struct_raw": {param.Field[int]{Present: true, Raw: Struct{A: "yo", B: 123}}, `{"a":"yo","b":123}`}, + + "param_struct": { + FieldStruct{ + A: param.Field[string]{Present: true, Value: "hello"}, + B: param.Field[int64]{Present: true, Value: int64(12)}, + D: param.Field[time.Time]{Present: true, Value: time.Date(2023, time.March, 18, 14, 47, 38, 0, time.UTC)}, + E: param.Field[time.Time]{Present: true, Value: time.Date(2023, time.March, 18, 14, 47, 38, 0, time.UTC)}, + }, + `{"a":"hello","b":12,"d":"2023-03-18","e":"2023-03-18T14:47:38Z"}`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + b, err := Marshal(test.value) + if err != nil { + t.Fatalf("didn't expect error %v", err) + } + if string(b) != test.expected { + t.Fatalf("expected %s, received %s", test.expected, string(b)) + } + }) + } +} diff --git a/internal/apijson/json_test.go b/internal/apijson/json_test.go new file mode 100644 index 0000000..72bc4c2 --- /dev/null +++ b/internal/apijson/json_test.go @@ -0,0 +1,554 @@ +package apijson + +import ( + "reflect" + "strings" + "testing" + "time" + + "github.com/tidwall/gjson" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `json:"a"` + B int `json:"b"` + C uint `json:"c"` + D float64 `json:"d"` + E float32 `json:"e"` + F []int `json:"f"` +} + +type PrimitivePointers struct { + A *bool `json:"a"` + B *int `json:"b"` + C *uint `json:"c"` + D *float64 `json:"d"` + E *float32 `json:"e"` + F *[]int `json:"f"` +} + +type Slices struct { + Slice []Primitives `json:"slices"` +} + +type DateTime struct { + Date time.Time `json:"date" format:"date"` + DateTime time.Time `json:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `json:"a"` + ExtraFields map[string]interface{} `json:"-,extras"` +} + +type TypedAdditionalProperties struct { + A bool `json:"a"` + ExtraFields map[string]int `json:"-,extras"` +} + +type EmbeddedStruct struct { + A bool `json:"a"` + B string `json:"b"` + + JSON EmbeddedStructJSON +} + +type EmbeddedStructJSON struct { + A Field + B Field + ExtraFields map[string]Field + raw string +} + +type EmbeddedStructs struct { + EmbeddedStruct + A *int `json:"a"` + ExtraFields map[string]interface{} `json:"-,extras"` + + JSON EmbeddedStructsJSON +} + +type EmbeddedStructsJSON struct { + A Field + ExtraFields map[string]Field + raw string +} + +type Recursive struct { + Name string `json:"name"` + Child *Recursive `json:"child"` +} + +type JSONFieldStruct struct { + A bool `json:"a"` + B int64 `json:"b"` + C string `json:"c"` + D string `json:"d"` + ExtraFields map[string]int64 `json:"-,extras"` + JSON JSONFieldStructJSON `json:"-,metadata"` +} + +type JSONFieldStructJSON struct { + A Field + B Field + C Field + D Field + ExtraFields map[string]Field + raw string +} + +type UnknownStruct struct { + Unknown interface{} `json:"unknown"` +} + +type UnionStruct struct { + Union Union `json:"union" format:"date"` +} + +type Union interface { + union() +} + +type Inline struct { + InlineField Primitives `json:"-,inline"` + JSON InlineJSON `json:"-,metadata"` +} + +type InlineArray struct { + InlineField []string `json:"-,inline"` + JSON InlineJSON `json:"-,metadata"` +} + +type InlineJSON struct { + InlineField Field + raw string +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionStructA struct { + Type string `json:"type"` + A string `json:"a"` + B string `json:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `json:"type"` + A string `json:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +func init() { + RegisterUnion(reflect.TypeOf((*Union)(nil)).Elem(), "type", + UnionVariant{ + TypeFilter: gjson.String, + Type: reflect.TypeOf(UnionTime{}), + }, + UnionVariant{ + TypeFilter: gjson.Number, + Type: reflect.TypeOf(UnionInteger(0)), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: "typeA", + Type: reflect.TypeOf(UnionStructA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: "typeB", + Type: reflect.TypeOf(UnionStructB{}), + }, + ) +} + +type ComplexUnionStruct struct { + Union ComplexUnion `json:"union"` +} + +type ComplexUnion interface { + complexUnion() +} + +type ComplexUnionA struct { + Boo string `json:"boo"` + Foo bool `json:"foo"` +} + +func (ComplexUnionA) complexUnion() {} + +type ComplexUnionB struct { + Boo bool `json:"boo"` + Foo string `json:"foo"` +} + +func (ComplexUnionB) complexUnion() {} + +type ComplexUnionC struct { + Boo int64 `json:"boo"` +} + +func (ComplexUnionC) complexUnion() {} + +type ComplexUnionTypeA struct { + Baz int64 `json:"baz"` + Type TypeA `json:"type"` +} + +func (ComplexUnionTypeA) complexUnion() {} + +type TypeA string + +func (t TypeA) IsKnown() bool { + return t == "a" +} + +type ComplexUnionTypeB struct { + Baz int64 `json:"baz"` + Type TypeB `json:"type"` +} + +type TypeB string + +func (t TypeB) IsKnown() bool { + return t == "b" +} + +type UnmarshalStruct struct { + Foo string `json:"foo"` + prop bool `json:"-"` +} + +func (r *UnmarshalStruct) UnmarshalJSON(json []byte) error { + r.prop = true + return UnmarshalRoot(json, r) +} + +func (ComplexUnionTypeB) complexUnion() {} + +func init() { + RegisterUnion(reflect.TypeOf((*ComplexUnion)(nil)).Elem(), "", + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionB{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionC{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionTypeA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionTypeB{}), + }, + ) +} + +var tests = map[string]struct { + buf string + val interface{} +}{ + "true": {"true", true}, + "false": {"false", false}, + "int": {"1", 1}, + "int_bigger": {"12324", 12324}, + "int_string_coerce": {`"65"`, 65}, + "int_boolean_coerce": {"true", 1}, + "int64": {"1", int64(1)}, + "int64_huge": {"123456789123456789", int64(123456789123456789)}, + "uint": {"1", uint(1)}, + "uint_bigger": {"12324", uint(12324)}, + "uint_coerce": {`"65"`, uint(65)}, + "float_1.54": {"1.54", float32(1.54)}, + "float_1.89": {"1.89", float64(1.89)}, + "string": {`"str"`, "str"}, + "string_int_coerce": {`12`, "12"}, + "array_string": {`["foo","bar"]`, []string{"foo", "bar"}}, + "array_int": {`[1,2]`, []int{1, 2}}, + "array_int_coerce": {`["1",2]`, []int{1, 2}}, + + "ptr_true": {"true", P(true)}, + "ptr_false": {"false", P(false)}, + "ptr_int": {"1", P(1)}, + "ptr_int_bigger": {"12324", P(12324)}, + "ptr_int_string_coerce": {`"65"`, P(65)}, + "ptr_int_boolean_coerce": {"true", P(1)}, + "ptr_int64": {"1", P(int64(1))}, + "ptr_int64_huge": {"123456789123456789", P(int64(123456789123456789))}, + "ptr_uint": {"1", P(uint(1))}, + "ptr_uint_bigger": {"12324", P(uint(12324))}, + "ptr_uint_coerce": {`"65"`, P(uint(65))}, + "ptr_float_1.54": {"1.54", P(float32(1.54))}, + "ptr_float_1.89": {"1.89", P(float64(1.89))}, + + "date_time": {`"2007-03-01T13:00:00Z"`, time.Date(2007, time.March, 1, 13, 0, 0, 0, time.UTC)}, + "date_time_nano_coerce": {`"2007-03-01T13:03:05.123456789Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 123456789, time.UTC)}, + + "date_time_missing_t_coerce": {`"2007-03-01 13:03:05Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.UTC)}, + "date_time_missing_timezone_coerce": {`"2007-03-01T13:03:05"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.UTC)}, + // note: using -1200 to minimize probability of conflicting with the local timezone of the test runner + // see https://en.wikipedia.org/wiki/UTC%E2%88%9212:00 + "date_time_missing_timezone_colon_coerce": {`"2007-03-01T13:03:05-1200"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.FixedZone("", -12*60*60))}, + "date_time_nano_missing_t_coerce": {`"2007-03-01 13:03:05.123456789Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 123456789, time.UTC)}, + + "map_string": {`{"foo":"bar"}`, map[string]string{"foo": "bar"}}, + "map_interface": {`{"a":1,"b":"str","c":false}`, map[string]interface{}{"a": float64(1), "b": "str", "c": false}}, + + "primitive_struct": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}`, + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + + "slices": { + `{"slices":[{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}]}`, + Slices{ + Slice: []Primitives{{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}}, + }, + }, + + "primitive_pointer_struct": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4,5]}`, + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + }, + + "datetime_struct": { + `{"date":"2006-01-02","date-time":"2006-01-02T15:04:05Z"}`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + }, + + "additional_properties": { + `{"a":true,"bar":"value","foo":true}`, + AdditionalProperties{ + A: true, + ExtraFields: map[string]interface{}{ + "bar": "value", + "foo": true, + }, + }, + }, + + "embedded_struct": { + `{"a":1,"b":"bar"}`, + EmbeddedStructs{ + EmbeddedStruct: EmbeddedStruct{ + A: true, + B: "bar", + JSON: EmbeddedStructJSON{ + A: Field{raw: `1`, status: valid}, + B: Field{raw: `"bar"`, status: valid}, + raw: `{"a":1,"b":"bar"}`, + }, + }, + A: P(1), + ExtraFields: map[string]interface{}{"b": "bar"}, + JSON: EmbeddedStructsJSON{ + A: Field{raw: `1`, status: valid}, + ExtraFields: map[string]Field{ + "b": {raw: `"bar"`, status: valid}, + }, + raw: `{"a":1,"b":"bar"}`, + }, + }, + }, + + "recursive_struct": { + `{"child":{"name":"Alex"},"name":"Robert"}`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "metadata_coerce": { + `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, + JSONFieldStruct{ + A: false, + B: 12, + C: "", + JSON: JSONFieldStructJSON{ + raw: `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, + A: Field{raw: `"12"`, status: invalid}, + B: Field{raw: `"12"`, status: valid}, + C: Field{raw: "null", status: null}, + D: Field{raw: "", status: missing}, + ExtraFields: map[string]Field{ + "extra_typed": { + raw: "12", + status: valid, + }, + "extra_untyped": { + raw: `{"foo":"bar"}`, + status: invalid, + }, + }, + }, + ExtraFields: map[string]int64{ + "extra_typed": 12, + "extra_untyped": 0, + }, + }, + }, + + "unknown_struct_number": { + `{"unknown":12}`, + UnknownStruct{ + Unknown: 12., + }, + }, + + "unknown_struct_map": { + `{"unknown":{"foo":"bar"}}`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + "union_integer": { + `{"union":12}`, + UnionStruct{ + Union: UnionInteger(12), + }, + }, + + "union_struct_discriminated_a": { + `{"union":{"a":"foo","b":"bar","type":"typeA"}}`, + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + }, + + "union_struct_discriminated_b": { + `{"union":{"a":"foo","type":"typeB"}}`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + }, + + "union_struct_time": { + `{"union":"2010-05-23"}`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + }, + + "complex_union_a": { + `{"union":{"boo":"12","foo":true}}`, + ComplexUnionStruct{Union: ComplexUnionA{Boo: "12", Foo: true}}, + }, + + "complex_union_b": { + `{"union":{"boo":true,"foo":"12"}}`, + ComplexUnionStruct{Union: ComplexUnionB{Boo: true, Foo: "12"}}, + }, + + "complex_union_c": { + `{"union":{"boo":12}}`, + ComplexUnionStruct{Union: ComplexUnionC{Boo: 12}}, + }, + + "complex_union_type_a": { + `{"union":{"baz":12,"type":"a"}}`, + ComplexUnionStruct{Union: ComplexUnionTypeA{Baz: 12, Type: TypeA("a")}}, + }, + + "complex_union_type_b": { + `{"union":{"baz":12,"type":"b"}}`, + ComplexUnionStruct{Union: ComplexUnionTypeB{Baz: 12, Type: TypeB("b")}}, + }, + + "unmarshal": { + `{"foo":"hello"}`, + &UnmarshalStruct{Foo: "hello", prop: true}, + }, + + "array_of_unmarshal": { + `[{"foo":"hello"}]`, + []UnmarshalStruct{{Foo: "hello", prop: true}}, + }, + + "inline_coerce": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}`, + Inline{ + InlineField: Primitives{A: false, B: 237628372683, C: 0x28e, D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + JSON: InlineJSON{ + InlineField: Field{raw: "{\"a\":false,\"b\":237628372683,\"c\":654,\"d\":9999.43,\"e\":43.76,\"f\":[1,2,3,4]}", status: 3}, + raw: "{\"a\":false,\"b\":237628372683,\"c\":654,\"d\":9999.43,\"e\":43.76,\"f\":[1,2,3,4]}", + }, + }, + }, + + "inline_array_coerce": { + `["Hello","foo","bar"]`, + InlineArray{ + InlineField: []string{"Hello", "foo", "bar"}, + JSON: InlineJSON{ + InlineField: Field{raw: `["Hello","foo","bar"]`, status: 3}, + raw: `["Hello","foo","bar"]`, + }, + }, + }, +} + +func TestDecode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + result := reflect.New(reflect.TypeOf(test.val)) + if err := Unmarshal([]byte(test.buf), result.Interface()); err != nil { + t.Fatalf("deserialization of %v failed with error %v", result, err) + } + if !reflect.DeepEqual(result.Elem().Interface(), test.val) { + t.Fatalf("expected '%s' to deserialize to \n%#v\nbut got\n%#v", test.buf, test.val, result.Elem().Interface()) + } + }) + } +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + if strings.HasSuffix(name, "_coerce") { + continue + } + t.Run(name, func(t *testing.T) { + raw, err := Marshal(test.val) + if err != nil { + t.Fatalf("serialization of %v failed with error %v", test.val, err) + } + if string(raw) != test.buf { + t.Fatalf("expected %+#v to serialize to %s but got %s", test.val, test.buf, string(raw)) + } + }) + } +} diff --git a/internal/apijson/port.go b/internal/apijson/port.go new file mode 100644 index 0000000..80b323b --- /dev/null +++ b/internal/apijson/port.go @@ -0,0 +1,107 @@ +package apijson + +import ( + "fmt" + "reflect" +) + +// Port copies over values from one struct to another struct. +func Port(from any, to any) error { + toVal := reflect.ValueOf(to) + fromVal := reflect.ValueOf(from) + + if toVal.Kind() != reflect.Ptr || toVal.IsNil() { + return fmt.Errorf("destination must be a non-nil pointer") + } + + for toVal.Kind() == reflect.Ptr { + toVal = toVal.Elem() + } + toType := toVal.Type() + + for fromVal.Kind() == reflect.Ptr { + fromVal = fromVal.Elem() + } + fromType := fromVal.Type() + + if toType.Kind() != reflect.Struct { + return fmt.Errorf("destination must be a non-nil pointer to a struct (%v %v)", toType, toType.Kind()) + } + + values := map[string]reflect.Value{} + fields := map[string]reflect.Value{} + + fromJSON := fromVal.FieldByName("JSON") + toJSON := toVal.FieldByName("JSON") + + // First, iterate through the from fields and load all the "normal" fields in the struct to the map of + // string to reflect.Value, as well as their raw .JSON.Foo counterpart. + for i := 0; i < fromType.NumField(); i++ { + field := fromType.Field(i) + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + if ptag.name == "-" { + continue + } + values[ptag.name] = fromVal.Field(i) + if fromJSON.IsValid() { + fields[ptag.name] = fromJSON.FieldByName(field.Name) + } + } + + // Use the values from the previous step to populate the 'to' struct. + for i := 0; i < toType.NumField(); i++ { + field := toType.Field(i) + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + if ptag.name == "-" { + continue + } + if value, ok := values[ptag.name]; ok { + delete(values, ptag.name) + if field.Type.Kind() == reflect.Interface { + toVal.Field(i).Set(value) + } else { + switch value.Kind() { + case reflect.String: + toVal.Field(i).SetString(value.String()) + case reflect.Bool: + toVal.Field(i).SetBool(value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + toVal.Field(i).SetInt(value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + toVal.Field(i).SetUint(value.Uint()) + case reflect.Float32, reflect.Float64: + toVal.Field(i).SetFloat(value.Float()) + default: + toVal.Field(i).Set(value) + } + } + } + + if fromJSONField, ok := fields[ptag.name]; ok { + if toJSONField := toJSON.FieldByName(field.Name); toJSONField.IsValid() { + toJSONField.Set(fromJSONField) + } + } + } + + // Finally, copy over the .JSON.raw and .JSON.ExtraFields + if toJSON.IsValid() { + if raw := toJSON.FieldByName("raw"); raw.IsValid() { + setUnexportedField(raw, fromJSON.Interface().(interface{ RawJSON() string }).RawJSON()) + } + + if toExtraFields := toJSON.FieldByName("ExtraFields"); toExtraFields.IsValid() { + if fromExtraFields := fromJSON.FieldByName("ExtraFields"); fromExtraFields.IsValid() { + setUnexportedField(toExtraFields, fromExtraFields.Interface()) + } + } + } + + return nil +} diff --git a/internal/apijson/port_test.go b/internal/apijson/port_test.go new file mode 100644 index 0000000..f9b6e3f --- /dev/null +++ b/internal/apijson/port_test.go @@ -0,0 +1,178 @@ +package apijson + +import ( + "reflect" + "testing" +) + +type Metadata struct { + CreatedAt string `json:"created_at"` +} + +// Card is the "combined" type of CardVisa and CardMastercard +type Card struct { + Processor CardProcessor `json:"processor"` + Data any `json:"data"` + IsFoo bool `json:"is_foo"` + IsBar bool `json:"is_bar"` + Metadata Metadata `json:"metadata"` + Value interface{} `json:"value"` + + JSON cardJSON +} + +type cardJSON struct { + Processor Field + Data Field + IsFoo Field + IsBar Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardJSON) RawJSON() string { return r.raw } + +type CardProcessor string + +// CardVisa +type CardVisa struct { + Processor CardVisaProcessor `json:"processor"` + Data CardVisaData `json:"data"` + IsFoo bool `json:"is_foo"` + Metadata Metadata `json:"metadata"` + Value string `json:"value"` + + JSON cardVisaJSON +} + +type cardVisaJSON struct { + Processor Field + Data Field + IsFoo Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardVisaJSON) RawJSON() string { return r.raw } + +type CardVisaProcessor string + +type CardVisaData struct { + Foo string `json:"foo"` +} + +// CardMastercard +type CardMastercard struct { + Processor CardMastercardProcessor `json:"processor"` + Data CardMastercardData `json:"data"` + IsBar bool `json:"is_bar"` + Metadata Metadata `json:"metadata"` + Value bool `json:"value"` + + JSON cardMastercardJSON +} + +type cardMastercardJSON struct { + Processor Field + Data Field + IsBar Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardMastercardJSON) RawJSON() string { return r.raw } + +type CardMastercardProcessor string + +type CardMastercardData struct { + Bar int64 `json:"bar"` +} + +var portTests = map[string]struct { + from any + to any +}{ + "visa to card": { + CardVisa{ + Processor: "visa", + IsFoo: true, + Data: CardVisaData{ + Foo: "foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "value", + JSON: cardVisaJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"foo"}}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"foo"}`, status: valid}, + Value: Field{raw: `"value"`, status: valid}, + ExtraFields: map[string]Field{"extra": {raw: `"yo"`, status: valid}}, + }, + }, + Card{ + Processor: "visa", + IsFoo: true, + IsBar: false, + Data: CardVisaData{ + Foo: "foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "value", + JSON: cardJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"foo"}}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"foo"}`, status: valid}, + Value: Field{raw: `"value"`, status: valid}, + ExtraFields: map[string]Field{"extra": {raw: `"yo"`, status: valid}}, + }, + }, + }, + "mastercard to card": { + CardMastercard{ + Processor: "mastercard", + IsBar: true, + Data: CardMastercardData{ + Bar: 13, + }, + Value: false, + }, + Card{ + Processor: "mastercard", + IsFoo: false, + IsBar: true, + Data: CardMastercardData{ + Bar: 13, + }, + Value: false, + }, + }, +} + +func TestPort(t *testing.T) { + for name, test := range portTests { + t.Run(name, func(t *testing.T) { + toVal := reflect.New(reflect.TypeOf(test.to)) + + err := Port(test.from, toVal.Interface()) + if err != nil { + t.Fatalf("port of %v failed with error %v", test.from, err) + } + + if !reflect.DeepEqual(toVal.Elem().Interface(), test.to) { + t.Fatalf("expected:\n%+#v\n\nto port to:\n%+#v\n\nbut got:\n%+#v", test.from, test.to, toVal.Elem().Interface()) + } + }) + } +} diff --git a/internal/apijson/registry.go b/internal/apijson/registry.go new file mode 100644 index 0000000..fcc518b --- /dev/null +++ b/internal/apijson/registry.go @@ -0,0 +1,27 @@ +package apijson + +import ( + "reflect" + + "github.com/tidwall/gjson" +) + +type UnionVariant struct { + TypeFilter gjson.Type + DiscriminatorValue interface{} + Type reflect.Type +} + +var unionRegistry = map[reflect.Type]unionEntry{} + +type unionEntry struct { + discriminatorKey string + variants []UnionVariant +} + +func RegisterUnion(typ reflect.Type, discriminator string, variants ...UnionVariant) { + unionRegistry[typ] = unionEntry{ + discriminatorKey: discriminator, + variants: variants, + } +} diff --git a/internal/apijson/tag.go b/internal/apijson/tag.go new file mode 100644 index 0000000..812fb3c --- /dev/null +++ b/internal/apijson/tag.go @@ -0,0 +1,47 @@ +package apijson + +import ( + "reflect" + "strings" +) + +const jsonStructTag = "json" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + required bool + extras bool + metadata bool + inline bool +} + +func parseJSONStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(jsonStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "required": + tag.required = true + case "extras": + tag.extras = true + case "metadata": + tag.metadata = true + case "inline": + tag.inline = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/internal/apiquery/encoder.go b/internal/apiquery/encoder.go new file mode 100644 index 0000000..ed8c4a2 --- /dev/null +++ b/internal/apiquery/encoder.go @@ -0,0 +1,341 @@ +package apiquery + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/openai/openai-go/internal/param" +) + +var encoders sync.Map // map[reflect.Type]encoderFunc + +type encoder struct { + dateFormat string + root bool + settings QuerySettings +} + +type encoderFunc func(key string, value reflect.Value) []Pair + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool + settings QuerySettings +} + +type Pair struct { + key string + value string +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + settings: e.settings, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(key string, v reflect.Value) []Pair { + wg.Wait() + return f(key, v) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func marshalerEncoder(key string, value reflect.Value) []Pair { + s, _ := value.Interface().(json.Marshaler).MarshalJSON() + return []Pair{{key, string(s)}} +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder(t) + } + if !e.root && t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return marshalerEncoder + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + encoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair) { + if !value.IsValid() || value.IsNil() { + return + } + pairs = encoder(key, value.Elem()) + return + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.FieldLike)(nil)).Elem()) { + return e.newFieldTypeEncoder(t) + } + + encoderFields := []encoderField{} + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If query tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseQueryStructTag(field) + if !ok { + continue + } + + if ptag.name == "-" && !ptag.inline { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + return func(key string, value reflect.Value) (pairs []Pair) { + for _, ef := range encoderFields { + var subkey string = e.renderKeyPath(key, ef.tag.name) + if ef.tag.inline { + subkey = key + } + + field := value.FieldByIndex(ef.idx) + pairs = append(pairs, ef.fn(subkey, field)...) + } + return + } +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + keyEncoder := e.typeEncoder(t.Key()) + elementEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair) { + iter := value.MapRange() + for iter.Next() { + encodedKey := keyEncoder("", iter.Key()) + if len(encodedKey) != 1 { + panic("Unexpected number of parts for encoded map key. Are you using a non-primitive for this map?") + } + subkey := encodedKey[0].value + keyPath := e.renderKeyPath(key, subkey) + pairs = append(pairs, elementEncoder(keyPath, iter.Value())...) + } + return + } +} + +func (e *encoder) renderKeyPath(key string, subkey string) string { + if len(key) == 0 { + return subkey + } + if e.settings.NestedFormat == NestedQueryFormatDots { + return fmt.Sprintf("%s.%s", key, subkey) + } + return fmt.Sprintf("%s[%s]", key, subkey) +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + switch e.settings.ArrayFormat { + case ArrayQueryFormatComma: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, v reflect.Value) []Pair { + elements := []string{} + for i := 0; i < v.Len(); i++ { + for _, pair := range innerEncoder("", v.Index(i)) { + elements = append(elements, pair.value) + } + } + if len(elements) == 0 { + return []Pair{} + } + return []Pair{{key, strings.Join(elements, ",")}} + } + case ArrayQueryFormatRepeat: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair) { + for i := 0; i < value.Len(); i++ { + pairs = append(pairs, innerEncoder(key, value.Index(i))...) + } + return pairs + } + case ArrayQueryFormatIndices: + panic("The array indices format is not supported yet") + case ArrayQueryFormatBrackets: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) []Pair { + pairs := []Pair{} + for i := 0; i < value.Len(); i++ { + pairs = append(pairs, innerEncoder(key+"[]", value.Index(i))...) + } + return pairs + } + default: + panic(fmt.Sprintf("Unknown ArrayFormat value: %d", e.settings.ArrayFormat)) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.newPrimitiveTypeEncoder(inner) + return func(key string, v reflect.Value) []Pair { + if !v.IsValid() || v.IsNil() { + return nil + } + return innerEncoder(key, v.Elem()) + } + case reflect.String: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, v.String()}} + } + case reflect.Bool: + return func(key string, v reflect.Value) []Pair { + if v.Bool() { + return []Pair{{key, "true"}} + } + return []Pair{{key, "false"}} + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatInt(v.Int(), 10)}} + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatUint(v.Uint(), 10)}} + } + case reflect.Float32, reflect.Float64: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatFloat(v.Float(), 'f', -1, 64)}} + } + case reflect.Complex64, reflect.Complex128: + bitSize := 64 + if t.Kind() == reflect.Complex128 { + bitSize = 128 + } + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatComplex(v.Complex(), 'f', -1, bitSize)}} + } + default: + return func(key string, v reflect.Value) []Pair { + return nil + } + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(key string, value reflect.Value) []Pair { + present := value.FieldByName("Present") + if !present.Bool() { + return nil + } + null := value.FieldByName("Null") + if null.Bool() { + // TODO: Error? + return nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(key, raw) + } + return enc(key, value.FieldByName("Value")) + } +} + +func (e *encoder) newTimeTypeEncoder(t reflect.Type) encoderFunc { + format := e.dateFormat + return func(key string, value reflect.Value) []Pair { + return []Pair{{ + key, + value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format), + }} + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(key string, value reflect.Value) []Pair { + value = value.Elem() + if !value.IsValid() { + return nil + } + return e.typeEncoder(value.Type())(key, value) + } + +} diff --git a/internal/apiquery/query.go b/internal/apiquery/query.go new file mode 100644 index 0000000..6f90e99 --- /dev/null +++ b/internal/apiquery/query.go @@ -0,0 +1,50 @@ +package apiquery + +import ( + "net/url" + "reflect" + "time" +) + +func MarshalWithSettings(value interface{}, settings QuerySettings) url.Values { + e := encoder{time.RFC3339, true, settings} + kv := url.Values{} + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil + } + typ := val.Type() + for _, pair := range e.typeEncoder(typ)("", val) { + kv.Add(pair.key, pair.value) + } + return kv +} + +func Marshal(value interface{}) url.Values { + return MarshalWithSettings(value, QuerySettings{}) +} + +type Queryer interface { + URLQuery() url.Values +} + +type QuerySettings struct { + NestedFormat NestedQueryFormat + ArrayFormat ArrayQueryFormat +} + +type NestedQueryFormat int + +const ( + NestedQueryFormatBrackets NestedQueryFormat = iota + NestedQueryFormatDots +) + +type ArrayQueryFormat int + +const ( + ArrayQueryFormatComma ArrayQueryFormat = iota + ArrayQueryFormatRepeat + ArrayQueryFormatIndices + ArrayQueryFormatBrackets +) diff --git a/internal/apiquery/query_test.go b/internal/apiquery/query_test.go new file mode 100644 index 0000000..1e740d6 --- /dev/null +++ b/internal/apiquery/query_test.go @@ -0,0 +1,335 @@ +package apiquery + +import ( + "net/url" + "testing" + "time" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `query:"a"` + B int `query:"b"` + C uint `query:"c"` + D float64 `query:"d"` + E float32 `query:"e"` + F []int `query:"f"` +} + +type PrimitivePointers struct { + A *bool `query:"a"` + B *int `query:"b"` + C *uint `query:"c"` + D *float64 `query:"d"` + E *float32 `query:"e"` + F *[]int `query:"f"` +} + +type Slices struct { + Slice []Primitives `query:"slices"` + Mixed []interface{} `query:"mixed"` +} + +type DateTime struct { + Date time.Time `query:"date" format:"date"` + DateTime time.Time `query:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `query:"a"` + Extras map[string]interface{} `query:"-,inline"` +} + +type Recursive struct { + Name string `query:"name"` + Child *Recursive `query:"child"` +} + +type UnknownStruct struct { + Unknown interface{} `query:"unknown"` +} + +type UnionStruct struct { + Union Union `query:"union" format:"date"` +} + +type Union interface { + union() +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionString string + +func (UnionString) union() {} + +type UnionStructA struct { + Type string `query:"type"` + A string `query:"a"` + B string `query:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `query:"type"` + A string `query:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +type DeeplyNested struct { + A DeeplyNested1 `query:"a"` +} + +type DeeplyNested1 struct { + B DeeplyNested2 `query:"b"` +} + +type DeeplyNested2 struct { + C DeeplyNested3 `query:"c"` +} + +type DeeplyNested3 struct { + D *string `query:"d"` +} + +var tests = map[string]struct { + enc string + val interface{} + settings QuerySettings +}{ + "primitives": { + "a=false&b=237628372683&c=654&d=9999.43&e=43.7599983215332&f=1,2,3,4", + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + QuerySettings{}, + }, + + "slices_brackets": { + `mixed[]=1&mixed[]=2.3&mixed[]=hello&slices[][a]=false&slices[][a]=false&slices[][b]=237628372683&slices[][b]=237628372683&slices[][c]=654&slices[][c]=654&slices[][d]=9999.43&slices[][d]=9999.43&slices[][e]=43.7599983215332&slices[][e]=43.7599983215332&slices[][f][]=1&slices[][f][]=2&slices[][f][]=3&slices[][f][]=4&slices[][f][]=1&slices[][f][]=2&slices[][f][]=3&slices[][f][]=4`, + Slices{ + Slice: []Primitives{ + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + Mixed: []interface{}{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatBrackets}, + }, + + "slices_comma": { + `mixed=1,2.3,hello`, + Slices{ + Mixed: []interface{}{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatComma}, + }, + + "slices_repeat": { + `mixed=1&mixed=2.3&mixed=hello&slices[a]=false&slices[a]=false&slices[b]=237628372683&slices[b]=237628372683&slices[c]=654&slices[c]=654&slices[d]=9999.43&slices[d]=9999.43&slices[e]=43.7599983215332&slices[e]=43.7599983215332&slices[f]=1&slices[f]=2&slices[f]=3&slices[f]=4&slices[f]=1&slices[f]=2&slices[f]=3&slices[f]=4`, + Slices{ + Slice: []Primitives{ + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + Mixed: []interface{}{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatRepeat}, + }, + + "primitive_pointer_struct": { + "a=false&b=237628372683&c=654&d=9999.43&e=43.7599983215332&f=1,2,3,4,5", + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + QuerySettings{}, + }, + + "datetime_struct": { + `date=2006-01-02&date-time=2006-01-02T15:04:05Z`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + QuerySettings{}, + }, + + "additional_properties": { + `a=true&bar=value&foo=true`, + AdditionalProperties{ + A: true, + Extras: map[string]interface{}{ + "bar": "value", + "foo": true, + }, + }, + QuerySettings{}, + }, + + "recursive_struct_brackets": { + `child[name]=Alex&name=Robert`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "recursive_struct_dots": { + `child.name=Alex&name=Robert`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "unknown_struct_number": { + `unknown=12`, + UnknownStruct{ + Unknown: 12., + }, + QuerySettings{}, + }, + + "unknown_struct_map_brackets": { + `unknown[foo]=bar`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "unknown_struct_map_dots": { + `unknown.foo=bar`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "union_string": { + `union=hello`, + UnionStruct{ + Union: UnionString("hello"), + }, + QuerySettings{}, + }, + + "union_integer": { + `union=12`, + UnionStruct{ + Union: UnionInteger(12), + }, + QuerySettings{}, + }, + + "union_struct_discriminated_a": { + `union[a]=foo&union[b]=bar&union[type]=typeA`, + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + QuerySettings{}, + }, + + "union_struct_discriminated_b": { + `union[a]=foo&union[type]=typeB`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + QuerySettings{}, + }, + + "union_struct_time": { + `union=2010-05-23`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + QuerySettings{}, + }, + + "deeply_nested_brackets": { + `a[b][c][d]=hello`, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: P("hello"), + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "deeply_nested_dots": { + `a.b.c.d=hello`, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: P("hello"), + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "deeply_nested_brackets_empty": { + ``, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: nil, + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "deeply_nested_dots_empty": { + ``, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: nil, + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + values := MarshalWithSettings(test.val, test.settings) + str, _ := url.QueryUnescape(values.Encode()) + if str != test.enc { + t.Fatalf("expected %+#v to serialize to %s but got %s", test.val, test.enc, str) + } + }) + } +} diff --git a/internal/apiquery/tag.go b/internal/apiquery/tag.go new file mode 100644 index 0000000..7ccd739 --- /dev/null +++ b/internal/apiquery/tag.go @@ -0,0 +1,41 @@ +package apiquery + +import ( + "reflect" + "strings" +) + +const queryStructTag = "query" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + omitempty bool + inline bool +} + +func parseQueryStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(queryStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "omitempty": + tag.omitempty = true + case "inline": + tag.inline = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/internal/pagination/pagination.go b/internal/pagination/pagination.go new file mode 100644 index 0000000..d41f647 --- /dev/null +++ b/internal/pagination/pagination.go @@ -0,0 +1,206 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package pagination + +import ( + "net/http" + "reflect" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +type Page[T any] struct { + Data []T `json:"data"` + Object string `json:"object,required"` + JSON pageJSON `json:"-"` + cfg *requestconfig.RequestConfig + res *http.Response +} + +// pageJSON contains the JSON metadata for the struct [Page[T]] +type pageJSON struct { + Data apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Page[T]) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r pageJSON) RawJSON() string { + return r.raw +} + +// NextPage returns the next page as defined by this pagination style. When there +// is no next page, this function will return a 'nil' for the page value, but will +// not return an error +func (r *Page[T]) GetNextPage() (res *Page[T], err error) { + // This page represents a response that isn't actually paginated at the API level + // so there will never be a next page. + cfg := (*requestconfig.RequestConfig)(nil) + if cfg == nil { + return nil, nil + } + var raw *http.Response + cfg.ResponseInto = &raw + cfg.ResponseBodyInto = &res + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +func (r *Page[T]) SetPageConfig(cfg *requestconfig.RequestConfig, res *http.Response) { + if r == nil { + r = &Page[T]{} + } + r.cfg = cfg + r.res = res +} + +type PageAutoPager[T any] struct { + page *Page[T] + cur T + idx int + run int + err error +} + +func NewPageAutoPager[T any](page *Page[T], err error) *PageAutoPager[T] { + return &PageAutoPager[T]{ + page: page, + err: err, + } +} + +func (r *PageAutoPager[T]) Next() bool { + if r.page == nil || len(r.page.Data) == 0 { + return false + } + if r.idx >= len(r.page.Data) { + r.idx = 0 + r.page, r.err = r.page.GetNextPage() + if r.err != nil || r.page == nil || len(r.page.Data) == 0 { + return false + } + } + r.cur = r.page.Data[r.idx] + r.run += 1 + r.idx += 1 + return true +} + +func (r *PageAutoPager[T]) Current() T { + return r.cur +} + +func (r *PageAutoPager[T]) Err() error { + return r.err +} + +func (r *PageAutoPager[T]) Index() int { + return r.run +} + +type CursorPage[T any] struct { + Data []T `json:"data"` + JSON cursorPageJSON `json:"-"` + cfg *requestconfig.RequestConfig + res *http.Response +} + +// cursorPageJSON contains the JSON metadata for the struct [CursorPage[T]] +type cursorPageJSON struct { + Data apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CursorPage[T]) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r cursorPageJSON) RawJSON() string { + return r.raw +} + +// NextPage returns the next page as defined by this pagination style. When there +// is no next page, this function will return a 'nil' for the page value, but will +// not return an error +func (r *CursorPage[T]) GetNextPage() (res *CursorPage[T], err error) { + items := r.Data + if items == nil || len(items) == 0 { + return nil, nil + } + cfg := r.cfg.Clone(r.cfg.Context) + value := reflect.ValueOf(items[len(items)-1]) + field := value.FieldByName("ID") + cfg.Apply(option.WithQuery("after", field.Interface().(string))) + var raw *http.Response + cfg.ResponseInto = &raw + cfg.ResponseBodyInto = &res + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +func (r *CursorPage[T]) SetPageConfig(cfg *requestconfig.RequestConfig, res *http.Response) { + if r == nil { + r = &CursorPage[T]{} + } + r.cfg = cfg + r.res = res +} + +type CursorPageAutoPager[T any] struct { + page *CursorPage[T] + cur T + idx int + run int + err error +} + +func NewCursorPageAutoPager[T any](page *CursorPage[T], err error) *CursorPageAutoPager[T] { + return &CursorPageAutoPager[T]{ + page: page, + err: err, + } +} + +func (r *CursorPageAutoPager[T]) Next() bool { + if r.page == nil || len(r.page.Data) == 0 { + return false + } + if r.idx >= len(r.page.Data) { + r.idx = 0 + r.page, r.err = r.page.GetNextPage() + if r.err != nil || r.page == nil || len(r.page.Data) == 0 { + return false + } + } + r.cur = r.page.Data[r.idx] + r.run += 1 + r.idx += 1 + return true +} + +func (r *CursorPageAutoPager[T]) Current() T { + return r.cur +} + +func (r *CursorPageAutoPager[T]) Err() error { + return r.err +} + +func (r *CursorPageAutoPager[T]) Index() int { + return r.run +} diff --git a/internal/param/field.go b/internal/param/field.go new file mode 100644 index 0000000..4d0fd9c --- /dev/null +++ b/internal/param/field.go @@ -0,0 +1,29 @@ +package param + +import ( + "fmt" +) + +type FieldLike interface{ field() } + +// Field is a wrapper used for all values sent to the API, +// to distinguish zero values from null or omitted fields. +// +// It also allows sending arbitrary deserializable values. +// +// To instantiate a Field, use the helpers exported from +// the package root: `F()`, `Null()`, `Raw()`, etc. +type Field[T any] struct { + FieldLike + Value T + Null bool + Present bool + Raw any +} + +func (f Field[T]) String() string { + if s, ok := any(f.Value).(fmt.Stringer); ok { + return s.String() + } + return fmt.Sprintf("%v", f.Value) +} diff --git a/internal/requestconfig/requestconfig.go b/internal/requestconfig/requestconfig.go new file mode 100644 index 0000000..3b5a114 --- /dev/null +++ b/internal/requestconfig/requestconfig.go @@ -0,0 +1,487 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package requestconfig + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "github.com/openai/openai-go/internal" + "github.com/openai/openai-go/internal/apierror" + "github.com/openai/openai-go/internal/apiform" + "github.com/openai/openai-go/internal/apiquery" +) + +func getDefaultHeaders() map[string]string { + return map[string]string{ + "User-Agent": fmt.Sprintf("OpenAI/Go %s", internal.PackageVersion), + } +} + +func getNormalizedOS() string { + switch runtime.GOOS { + case "ios": + return "iOS" + case "android": + return "Android" + case "darwin": + return "MacOS" + case "window": + return "Windows" + case "freebsd": + return "FreeBSD" + case "openbsd": + return "OpenBSD" + case "linux": + return "Linux" + default: + return fmt.Sprintf("Other:%s", runtime.GOOS) + } +} + +func getNormalizedArchitecture() string { + switch runtime.GOARCH { + case "386": + return "x32" + case "amd64": + return "x64" + case "arm": + return "arm" + case "arm64": + return "arm64" + default: + return fmt.Sprintf("other:%s", runtime.GOARCH) + } +} + +func getPlatformProperties() map[string]string { + return map[string]string{ + "X-Stainless-Lang": "go", + "X-Stainless-Package-Version": internal.PackageVersion, + "X-Stainless-OS": getNormalizedOS(), + "X-Stainless-Arch": getNormalizedArchitecture(), + "X-Stainless-Runtime": "go", + "X-Stainless-Runtime-Version": runtime.Version(), + } +} + +func NewRequestConfig(ctx context.Context, method string, u string, body interface{}, dst interface{}, opts ...func(*RequestConfig) error) (*RequestConfig, error) { + var reader io.Reader + + contentType := "application/json" + hasSerializationFunc := false + + if body, ok := body.(json.Marshaler); ok { + content, err := body.MarshalJSON() + if err != nil { + return nil, err + } + reader = bytes.NewBuffer(content) + hasSerializationFunc = true + } + if body, ok := body.(apiform.Marshaler); ok { + var ( + content []byte + err error + ) + content, contentType, err = body.MarshalMultipart() + if err != nil { + return nil, err + } + reader = bytes.NewBuffer(content) + hasSerializationFunc = true + } + if body, ok := body.(apiquery.Queryer); ok { + hasSerializationFunc = true + params := body.URLQuery().Encode() + if params != "" { + u = u + "?" + params + } + } + if body, ok := body.([]byte); ok { + reader = bytes.NewBuffer(body) + hasSerializationFunc = true + } + if body, ok := body.(io.Reader); ok { + reader = body + hasSerializationFunc = true + } + + // Fallback to json serialization if none of the serialization functions that we expect + // to see is present. + if body != nil && !hasSerializationFunc { + content, err := json.Marshal(body) + if err != nil { + return nil, err + } + reader = bytes.NewBuffer(content) + } + + req, err := http.NewRequestWithContext(ctx, method, u, nil) + if err != nil { + return nil, err + } + if reader != nil { + req.Header.Set("Content-Type", contentType) + } + + req.Header.Set("Accept", "application/json") + for k, v := range getDefaultHeaders() { + req.Header.Add(k, v) + } + + for k, v := range getPlatformProperties() { + req.Header.Add(k, v) + } + cfg := RequestConfig{ + MaxRetries: 2, + Context: ctx, + Request: req, + HTTPClient: http.DefaultClient, + Body: reader, + } + cfg.ResponseBodyInto = dst + err = cfg.Apply(opts...) + if err != nil { + return nil, err + } + return &cfg, nil +} + +// RequestConfig represents all the state related to one request. +// +// Editing the variables inside RequestConfig directly is unstable api. Prefer +// composing func(\*RequestConfig) error instead if possible. +type RequestConfig struct { + MaxRetries int + RequestTimeout time.Duration + Context context.Context + Request *http.Request + BaseURL *url.URL + HTTPClient *http.Client + Middlewares []middleware + APIKey string + Organization string + Project string + // If ResponseBodyInto not nil, then we will attempt to deserialize into + // ResponseBodyInto. If Destination is a []byte, then it will return the body as + // is. + ResponseBodyInto interface{} + // ResponseInto copies the \*http.Response of the corresponding request into the + // given address + ResponseInto **http.Response + Body io.Reader +} + +// middleware is exactly the same type as the Middleware type found in the [option] package, +// but it is redeclared here for circular dependency issues. +type middleware = func(*http.Request, middlewareNext) (*http.Response, error) + +// middlewareNext is exactly the same type as the MiddlewareNext type found in the [option] package, +// but it is redeclared here for circular dependency issues. +type middlewareNext = func(*http.Request) (*http.Response, error) + +func applyMiddleware(middleware middleware, next middlewareNext) middlewareNext { + return func(req *http.Request) (res *http.Response, err error) { + return middleware(req, next) + } +} + +func shouldRetry(req *http.Request, res *http.Response) bool { + // If there is no way to recover the Body, then we shouldn't retry. + if req.Body != nil && req.GetBody == nil { + return false + } + + // If there is no response, that indicates that there is a connection error + // so we retry the request. + if res == nil { + return true + } + + // If the header explictly wants a retry behavior, respect that over the + // http status code. + if res.Header.Get("x-should-retry") == "true" { + return true + } + if res.Header.Get("x-should-retry") == "false" { + return false + } + + return res.StatusCode == http.StatusRequestTimeout || + res.StatusCode == http.StatusConflict || + res.StatusCode == http.StatusTooManyRequests || + res.StatusCode >= http.StatusInternalServerError +} + +func parseRetryAfterHeader(resp *http.Response) (time.Duration, bool) { + if resp == nil { + return 0, false + } + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) (time.Duration, bool) + } + + nop := func(string) (time.Duration, bool) { return 0, false } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: "Retry-After-Ms", + units: time.Millisecond, + custom: nop, + }, + { + header: "Retry-After", + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) (time.Duration, bool) { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0, false + } + return time.Until(t), true + }, + }, + } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, err := strconv.ParseFloat(v, 64); err == nil { + return time.Duration(retryAfter * float64(retry.units)), true + } + if d, ok := retry.custom(v); ok { + return d, true + } + } + + return 0, false +} + +func retryDelay(res *http.Response, retryCount int) time.Duration { + // If the API asks us to wait a certain amount of time (and it's a reasonable amount), + // just do what it says. + + if retryAfterDelay, ok := parseRetryAfterHeader(res); ok && 0 <= retryAfterDelay && retryAfterDelay < time.Minute { + return retryAfterDelay + } + + maxDelay := 8 * time.Second + delay := time.Duration(0.5 * float64(time.Second) * math.Pow(2, float64(retryCount))) + if delay > maxDelay { + delay = maxDelay + } + + jitter := rand.Int63n(int64(delay / 4)) + delay -= time.Duration(jitter) + return delay +} + +func (cfg *RequestConfig) Execute() (err error) { + cfg.Request.URL, err = cfg.BaseURL.Parse(strings.TrimLeft(cfg.Request.URL.String(), "/")) + if err != nil { + return err + } + + if cfg.Body != nil && cfg.Request.Body == nil { + switch body := cfg.Body.(type) { + case *bytes.Buffer: + b := body.Bytes() + cfg.Request.ContentLength = int64(body.Len()) + cfg.Request.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(b)), nil } + cfg.Request.Body, _ = cfg.Request.GetBody() + case *bytes.Reader: + cfg.Request.ContentLength = int64(body.Len()) + cfg.Request.GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, 0) + return io.NopCloser(body), err + } + cfg.Request.Body, _ = cfg.Request.GetBody() + default: + if rc, ok := body.(io.ReadCloser); ok { + cfg.Request.Body = rc + } else { + cfg.Request.Body = io.NopCloser(body) + } + } + } + + handler := cfg.HTTPClient.Do + for i := len(cfg.Middlewares) - 1; i >= 0; i -= 1 { + handler = applyMiddleware(cfg.Middlewares[i], handler) + } + + var res *http.Response + for retryCount := 0; retryCount <= cfg.MaxRetries; retryCount += 1 { + ctx := cfg.Request.Context() + if cfg.RequestTimeout != time.Duration(0) { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cfg.RequestTimeout) + defer cancel() + } + + res, err = handler(cfg.Request.Clone(ctx)) + if ctx != nil && ctx.Err() != nil { + return ctx.Err() + } + if !shouldRetry(cfg.Request, res) || retryCount >= cfg.MaxRetries { + break + } + + // Prepare next request and wait for the retry delay + if cfg.Request.GetBody != nil { + cfg.Request.Body, err = cfg.Request.GetBody() + if err != nil { + return err + } + } + + // Can't actually refresh the body, so we don't attempt to retry here + if cfg.Request.GetBody == nil && cfg.Request.Body != nil { + break + } + + time.Sleep(retryDelay(res, retryCount)) + } + + // Save *http.Response if it is requested to, even if there was an error making the request. This is + // useful in cases where you might want to debug by inspecting the response. Note that if err != nil, + // the response should be generally be empty, but there are edge cases. + if cfg.ResponseInto != nil { + *cfg.ResponseInto = res + } + if responseBodyInto, ok := cfg.ResponseBodyInto.(**http.Response); ok { + *responseBodyInto = res + } + + // If there was a connection error in the final request or any other transport error, + // return that early without trying to coerce into an APIError. + if err != nil { + return err + } + + if res.StatusCode >= 400 { + contents, err := io.ReadAll(res.Body) + res.Body.Close() + if err != nil { + return err + } + + // If there is an APIError, re-populate the response body so that debugging + // utilities can conveniently dump the response without issue. + res.Body = io.NopCloser(bytes.NewBuffer(contents)) + + // Load the contents into the error format if it is provided. + aerr := apierror.Error{Request: cfg.Request, Response: res, StatusCode: res.StatusCode} + err = aerr.UnmarshalJSON(contents) + if err != nil { + return err + } + return &aerr + } + + if cfg.ResponseBodyInto == nil { + return nil + } + if _, ok := cfg.ResponseBodyInto.(**http.Response); ok { + return nil + } + + contents, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("error reading response body: %w", err) + } + + // If we are not json, return plaintext + contentType := res.Header.Get("content-type") + isJSON := strings.Contains(contentType, "application/json") || strings.Contains(contentType, "application/vnd.api+json") + if !isJSON { + switch dst := cfg.ResponseBodyInto.(type) { + case *string: + *dst = string(contents) + case **string: + tmp := string(contents) + *dst = &tmp + case *[]byte: + *dst = contents + default: + return fmt.Errorf("expected destination type of 'string' or '[]byte' for responses with content-type that is not 'application/json'") + } + return nil + } + + // If the response happens to be a byte array, deserialize the body as-is. + switch dst := cfg.ResponseBodyInto.(type) { + case *[]byte: + *dst = contents + } + + err = json.NewDecoder(bytes.NewReader(contents)).Decode(cfg.ResponseBodyInto) + if err != nil { + return fmt.Errorf("error parsing response json: %w", err) + } + + return nil +} + +func ExecuteNewRequest(ctx context.Context, method string, u string, body interface{}, dst interface{}, opts ...func(*RequestConfig) error) error { + cfg, err := NewRequestConfig(ctx, method, u, body, dst, opts...) + if err != nil { + return err + } + return cfg.Execute() +} + +func (cfg *RequestConfig) Clone(ctx context.Context) *RequestConfig { + if cfg == nil { + return nil + } + req := cfg.Request.Clone(ctx) + var err error + if req.Body != nil { + req.Body, err = req.GetBody() + } + if err != nil { + return nil + } + new := &RequestConfig{ + MaxRetries: cfg.MaxRetries, + Context: ctx, + Request: req, + HTTPClient: cfg.HTTPClient, + } + + return new +} + +func (cfg *RequestConfig) Apply(opts ...func(*RequestConfig) error) error { + for _, opt := range opts { + err := opt(cfg) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/testutil/testutil.go b/internal/testutil/testutil.go new file mode 100644 index 0000000..826d266 --- /dev/null +++ b/internal/testutil/testutil.go @@ -0,0 +1,27 @@ +package testutil + +import ( + "net/http" + "os" + "strconv" + "testing" +) + +func CheckTestServer(t *testing.T, url string) bool { + if _, err := http.Get(url); err != nil { + const SKIP_MOCK_TESTS = "SKIP_MOCK_TESTS" + if str, ok := os.LookupEnv(SKIP_MOCK_TESTS); ok { + skip, err := strconv.ParseBool(str) + if err != nil { + t.Fatalf("strconv.ParseBool(os.LookupEnv(%s)) failed: %s", SKIP_MOCK_TESTS, err) + } + if skip { + t.Skip("The test will not run without a mock Prism server running against your OpenAPI spec") + return false + } + t.Errorf("The test will not run without a mock Prism server running against your OpenAPI spec. You can set the environment variable %s to true to skip running any tests that require the mock server", SKIP_MOCK_TESTS) + return false + } + } + return true +} diff --git a/internal/version.go b/internal/version.go new file mode 100644 index 0000000..4ff68e4 --- /dev/null +++ b/internal/version.go @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package internal + +const PackageVersion = "0.0.1-alpha.0" // x-release-please-version diff --git a/lib/.keep b/lib/.keep new file mode 100644 index 0000000..5e2c99f --- /dev/null +++ b/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/model.go b/model.go new file mode 100644 index 0000000..bc9991b --- /dev/null +++ b/model.go @@ -0,0 +1,155 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/pagination" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// ModelService contains methods and other services that help with interacting with +// the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewModelService] method instead. +type ModelService struct { + Options []option.RequestOption +} + +// NewModelService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewModelService(opts ...option.RequestOption) (r *ModelService) { + r = &ModelService{} + r.Options = opts + return +} + +// Retrieves a model instance, providing basic information about the model such as +// the owner and permissioning. +func (r *ModelService) Get(ctx context.Context, model string, opts ...option.RequestOption) (res *Model, err error) { + opts = append(r.Options[:], opts...) + if model == "" { + err = errors.New("missing required model parameter") + return + } + path := fmt.Sprintf("models/%s", model) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Lists the currently available models, and provides basic information about each +// one such as the owner and availability. +func (r *ModelService) List(ctx context.Context, opts ...option.RequestOption) (res *pagination.Page[Model], err error) { + var raw *http.Response + opts = append(r.Options[:], opts...) + opts = append([]option.RequestOption{option.WithResponseInto(&raw)}, opts...) + path := "models" + cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, nil, &res, opts...) + if err != nil { + return nil, err + } + err = cfg.Execute() + if err != nil { + return nil, err + } + res.SetPageConfig(cfg, raw) + return res, nil +} + +// Lists the currently available models, and provides basic information about each +// one such as the owner and availability. +func (r *ModelService) ListAutoPaging(ctx context.Context, opts ...option.RequestOption) *pagination.PageAutoPager[Model] { + return pagination.NewPageAutoPager(r.List(ctx, opts...)) +} + +// Delete a fine-tuned model. You must have the Owner role in your organization to +// delete a model. +func (r *ModelService) Delete(ctx context.Context, model string, opts ...option.RequestOption) (res *ModelDeleted, err error) { + opts = append(r.Options[:], opts...) + if model == "" { + err = errors.New("missing required model parameter") + return + } + path := fmt.Sprintf("models/%s", model) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +// Describes an OpenAI model offering that can be used with the API. +type Model struct { + // The model identifier, which can be referenced in the API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) when the model was created. + Created int64 `json:"created,required"` + // The object type, which is always "model". + Object ModelObject `json:"object,required"` + // The organization that owns the model. + OwnedBy string `json:"owned_by,required"` + JSON modelJSON `json:"-"` +} + +// modelJSON contains the JSON metadata for the struct [Model] +type modelJSON struct { + ID apijson.Field + Created apijson.Field + Object apijson.Field + OwnedBy apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Model) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r modelJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always "model". +type ModelObject string + +const ( + ModelObjectModel ModelObject = "model" +) + +func (r ModelObject) IsKnown() bool { + switch r { + case ModelObjectModel: + return true + } + return false +} + +type ModelDeleted struct { + ID string `json:"id,required"` + Deleted bool `json:"deleted,required"` + Object string `json:"object,required"` + JSON modelDeletedJSON `json:"-"` +} + +// modelDeletedJSON contains the JSON metadata for the struct [ModelDeleted] +type modelDeletedJSON struct { + ID apijson.Field + Deleted apijson.Field + Object apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ModelDeleted) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r modelDeletedJSON) RawJSON() string { + return r.raw +} diff --git a/model_test.go b/model_test.go new file mode 100644 index 0000000..205b5d0 --- /dev/null +++ b/model_test.go @@ -0,0 +1,80 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestModelGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Models.Get(context.TODO(), "gpt-3.5-turbo") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestModelList(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Models.List(context.TODO()) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestModelDelete(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Models.Delete(context.TODO(), "ft:gpt-3.5-turbo:acemeco:suffix:abc123") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/moderation.go b/moderation.go new file mode 100644 index 0000000..3870493 --- /dev/null +++ b/moderation.go @@ -0,0 +1,259 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// ModerationService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewModerationService] method instead. +type ModerationService struct { + Options []option.RequestOption +} + +// NewModerationService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewModerationService(opts ...option.RequestOption) (r *ModerationService) { + r = &ModerationService{} + r.Options = opts + return +} + +// Classifies if text is potentially harmful. +func (r *ModerationService) New(ctx context.Context, body ModerationNewParams, opts ...option.RequestOption) (res *ModerationNewResponse, err error) { + opts = append(r.Options[:], opts...) + path := "moderations" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type Moderation struct { + // A list of the categories, and whether they are flagged or not. + Categories ModerationCategories `json:"categories,required"` + // A list of the categories along with their scores as predicted by model. + CategoryScores ModerationCategoryScores `json:"category_scores,required"` + // Whether any of the below categories are flagged. + Flagged bool `json:"flagged,required"` + JSON moderationJSON `json:"-"` +} + +// moderationJSON contains the JSON metadata for the struct [Moderation] +type moderationJSON struct { + Categories apijson.Field + CategoryScores apijson.Field + Flagged apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Moderation) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r moderationJSON) RawJSON() string { + return r.raw +} + +// A list of the categories, and whether they are flagged or not. +type ModerationCategories struct { + // Content that expresses, incites, or promotes harassing language towards any + // target. + Harassment bool `json:"harassment,required"` + // Harassment content that also includes violence or serious harm towards any + // target. + HarassmentThreatening bool `json:"harassment/threatening,required"` + // Content that expresses, incites, or promotes hate based on race, gender, + // ethnicity, religion, nationality, sexual orientation, disability status, or + // caste. Hateful content aimed at non-protected groups (e.g., chess players) is + // harassment. + Hate bool `json:"hate,required"` + // Hateful content that also includes violence or serious harm towards the targeted + // group based on race, gender, ethnicity, religion, nationality, sexual + // orientation, disability status, or caste. + HateThreatening bool `json:"hate/threatening,required"` + // Content that promotes, encourages, or depicts acts of self-harm, such as + // suicide, cutting, and eating disorders. + SelfHarm bool `json:"self-harm,required"` + // Content that encourages performing acts of self-harm, such as suicide, cutting, + // and eating disorders, or that gives instructions or advice on how to commit such + // acts. + SelfHarmInstructions bool `json:"self-harm/instructions,required"` + // Content where the speaker expresses that they are engaging or intend to engage + // in acts of self-harm, such as suicide, cutting, and eating disorders. + SelfHarmIntent bool `json:"self-harm/intent,required"` + // Content meant to arouse sexual excitement, such as the description of sexual + // activity, or that promotes sexual services (excluding sex education and + // wellness). + Sexual bool `json:"sexual,required"` + // Sexual content that includes an individual who is under 18 years old. + SexualMinors bool `json:"sexual/minors,required"` + // Content that depicts death, violence, or physical injury. + Violence bool `json:"violence,required"` + // Content that depicts death, violence, or physical injury in graphic detail. + ViolenceGraphic bool `json:"violence/graphic,required"` + JSON moderationCategoriesJSON `json:"-"` +} + +// moderationCategoriesJSON contains the JSON metadata for the struct +// [ModerationCategories] +type moderationCategoriesJSON struct { + Harassment apijson.Field + HarassmentThreatening apijson.Field + Hate apijson.Field + HateThreatening apijson.Field + SelfHarm apijson.Field + SelfHarmInstructions apijson.Field + SelfHarmIntent apijson.Field + Sexual apijson.Field + SexualMinors apijson.Field + Violence apijson.Field + ViolenceGraphic apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ModerationCategories) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r moderationCategoriesJSON) RawJSON() string { + return r.raw +} + +// A list of the categories along with their scores as predicted by model. +type ModerationCategoryScores struct { + // The score for the category 'harassment'. + Harassment float64 `json:"harassment,required"` + // The score for the category 'harassment/threatening'. + HarassmentThreatening float64 `json:"harassment/threatening,required"` + // The score for the category 'hate'. + Hate float64 `json:"hate,required"` + // The score for the category 'hate/threatening'. + HateThreatening float64 `json:"hate/threatening,required"` + // The score for the category 'self-harm'. + SelfHarm float64 `json:"self-harm,required"` + // The score for the category 'self-harm/instructions'. + SelfHarmInstructions float64 `json:"self-harm/instructions,required"` + // The score for the category 'self-harm/intent'. + SelfHarmIntent float64 `json:"self-harm/intent,required"` + // The score for the category 'sexual'. + Sexual float64 `json:"sexual,required"` + // The score for the category 'sexual/minors'. + SexualMinors float64 `json:"sexual/minors,required"` + // The score for the category 'violence'. + Violence float64 `json:"violence,required"` + // The score for the category 'violence/graphic'. + ViolenceGraphic float64 `json:"violence/graphic,required"` + JSON moderationCategoryScoresJSON `json:"-"` +} + +// moderationCategoryScoresJSON contains the JSON metadata for the struct +// [ModerationCategoryScores] +type moderationCategoryScoresJSON struct { + Harassment apijson.Field + HarassmentThreatening apijson.Field + Hate apijson.Field + HateThreatening apijson.Field + SelfHarm apijson.Field + SelfHarmInstructions apijson.Field + SelfHarmIntent apijson.Field + Sexual apijson.Field + SexualMinors apijson.Field + Violence apijson.Field + ViolenceGraphic apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ModerationCategoryScores) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r moderationCategoryScoresJSON) RawJSON() string { + return r.raw +} + +// Represents if a given text input is potentially harmful. +type ModerationNewResponse struct { + // The unique identifier for the moderation request. + ID string `json:"id,required"` + // The model used to generate the moderation results. + Model string `json:"model,required"` + // A list of moderation objects. + Results []Moderation `json:"results,required"` + JSON moderationNewResponseJSON `json:"-"` +} + +// moderationNewResponseJSON contains the JSON metadata for the struct +// [ModerationNewResponse] +type moderationNewResponseJSON struct { + ID apijson.Field + Model apijson.Field + Results apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ModerationNewResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r moderationNewResponseJSON) RawJSON() string { + return r.raw +} + +type ModerationNewParams struct { + // The input text to classify + Input param.Field[ModerationNewParamsInputUnion] `json:"input,required"` + // Two content moderations models are available: `text-moderation-stable` and + // `text-moderation-latest`. + // + // The default is `text-moderation-latest` which will be automatically upgraded + // over time. This ensures you are always using our most accurate model. If you use + // `text-moderation-stable`, we will provide advanced notice before updating the + // model. Accuracy of `text-moderation-stable` may be slightly lower than for + // `text-moderation-latest`. + Model param.Field[ModerationNewParamsModel] `json:"model"` +} + +func (r ModerationNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The input text to classify +// +// Satisfied by [shared.UnionString], [ModerationNewParamsInputArray]. +type ModerationNewParamsInputUnion interface { + ImplementsModerationNewParamsInputUnion() +} + +type ModerationNewParamsInputArray []string + +func (r ModerationNewParamsInputArray) ImplementsModerationNewParamsInputUnion() {} + +type ModerationNewParamsModel string + +const ( + ModerationNewParamsModelTextModerationLatest ModerationNewParamsModel = "text-moderation-latest" + ModerationNewParamsModelTextModerationStable ModerationNewParamsModel = "text-moderation-stable" +) + +func (r ModerationNewParamsModel) IsKnown() bool { + switch r { + case ModerationNewParamsModelTextModerationLatest, ModerationNewParamsModelTextModerationStable: + return true + } + return false +} diff --git a/moderation_test.go b/moderation_test.go new file mode 100644 index 0000000..80000e2 --- /dev/null +++ b/moderation_test.go @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestModerationNewWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Moderations.New(context.TODO(), openai.ModerationNewParams{ + Input: openai.F[openai.ModerationNewParamsInputUnion](shared.UnionString("I want to kill them.")), + Model: openai.F(openai.ModerationNewParamsModelTextModerationLatest), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/option/requestoption.go b/option/requestoption.go new file mode 100644 index 0000000..a61e6a4 --- /dev/null +++ b/option/requestoption.go @@ -0,0 +1,253 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package option + +import ( + "bytes" + "fmt" + "io" + "log" + "net/http" + "net/url" + "time" + + "github.com/openai/openai-go/internal/requestconfig" + "github.com/tidwall/sjson" +) + +// RequestOption is an option for the requests made by the openai API Client +// which can be supplied to clients, services, and methods. You can read more about this functional +// options pattern in our [README]. +// +// [README]: https://pkg.go.dev/github.com/openai/openai-go#readme-requestoptions +type RequestOption = func(*requestconfig.RequestConfig) error + +// WithBaseURL returns a RequestOption that sets the BaseURL for the client. +func WithBaseURL(base string) RequestOption { + u, err := url.Parse(base) + if err != nil { + log.Fatalf("failed to parse BaseURL: %s\n", err) + } + return func(r *requestconfig.RequestConfig) error { + r.BaseURL = u + return nil + } +} + +// WithHTTPClient returns a RequestOption that changes the underlying [http.Client] used to make this +// request, which by default is [http.DefaultClient]. +func WithHTTPClient(client *http.Client) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.HTTPClient = client + return nil + } +} + +// MiddlewareNext is a function which is called by a middleware to pass an HTTP request +// to the next stage in the middleware chain. +type MiddlewareNext = func(*http.Request) (*http.Response, error) + +// Middleware is a function which intercepts HTTP requests, processing or modifying +// them, and then passing the request to the next middleware or handler +// in the chain by calling the provided MiddlewareNext function. +type Middleware = func(*http.Request, MiddlewareNext) (*http.Response, error) + +// WithMiddleware returns a RequestOption that applies the given middleware +// to the requests made. Each middleware will execute in the order they were given. +func WithMiddleware(middlewares ...Middleware) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Middlewares = append(r.Middlewares, middlewares...) + return nil + } +} + +// WithMaxRetries returns a RequestOption that sets the maximum number of retries that the client +// attempts to make. When given 0, the client only makes one request. By +// default, the client retries two times. +// +// WithMaxRetries panics when retries is negative. +func WithMaxRetries(retries int) RequestOption { + if retries < 0 { + panic("option: cannot have fewer than 0 retries") + } + return func(r *requestconfig.RequestConfig) error { + r.MaxRetries = retries + return nil + } +} + +// WithHeader returns a RequestOption that sets the header value to the associated key. It overwrites +// any value if there was one already present. +func WithHeader(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Request.Header.Set(key, value) + return nil + } +} + +// WithHeaderAdd returns a RequestOption that adds the header value to the associated key. It appends +// onto any existing values. +func WithHeaderAdd(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Request.Header.Add(key, value) + return nil + } +} + +// WithHeaderDel returns a RequestOption that deletes the header value(s) associated with the given key. +func WithHeaderDel(key string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Request.Header.Del(key) + return nil + } +} + +// WithQuery returns a RequestOption that sets the query value to the associated key. It overwrites +// any value if there was one already present. +func WithQuery(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + query := r.Request.URL.Query() + query.Set(key, value) + r.Request.URL.RawQuery = query.Encode() + return nil + } +} + +// WithQueryAdd returns a RequestOption that adds the query value to the associated key. It appends +// onto any existing values. +func WithQueryAdd(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + query := r.Request.URL.Query() + query.Add(key, value) + r.Request.URL.RawQuery = query.Encode() + return nil + } +} + +// WithQueryDel returns a RequestOption that deletes the query value(s) associated with the key. +func WithQueryDel(key string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + query := r.Request.URL.Query() + query.Del(key) + r.Request.URL.RawQuery = query.Encode() + return nil + } +} + +// WithJSONSet returns a RequestOption that sets the body's JSON value associated with the key. +// The key accepts a string as defined by the [sjson format]. +// +// [sjson format]: https://github.com/tidwall/sjson +func WithJSONSet(key string, value interface{}) RequestOption { + return func(r *requestconfig.RequestConfig) (err error) { + if buffer, ok := r.Body.(*bytes.Buffer); ok { + b := buffer.Bytes() + b, err = sjson.SetBytes(b, key, value) + if err != nil { + return err + } + r.Body = bytes.NewBuffer(b) + return nil + } + + return fmt.Errorf("cannot use WithJSONSet on a body that is not serialized as *bytes.Buffer") + } +} + +// WithJSONDel returns a RequestOption that deletes the body's JSON value associated with the key. +// The key accepts a string as defined by the [sjson format]. +// +// [sjson format]: https://github.com/tidwall/sjson +func WithJSONDel(key string) RequestOption { + return func(r *requestconfig.RequestConfig) (err error) { + if buffer, ok := r.Body.(*bytes.Buffer); ok { + b := buffer.Bytes() + b, err = sjson.DeleteBytes(b, key) + if err != nil { + return err + } + r.Body = bytes.NewBuffer(b) + return nil + } + + return fmt.Errorf("cannot use WithJSONDel on a body that is not serialized as *bytes.Buffer") + } +} + +// WithResponseBodyInto returns a RequestOption that overwrites the deserialization target with +// the given destination. If provided, we don't deserialize into the default struct. +func WithResponseBodyInto(dst any) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.ResponseBodyInto = dst + return nil + } +} + +// WithResponseInto returns a RequestOption that copies the [*http.Response] into the given address. +func WithResponseInto(dst **http.Response) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.ResponseInto = dst + return nil + } +} + +// WithRequestBody returns a RequestOption that provides a custom serialized body with the given +// content type. +// +// body accepts an io.Reader or raw []bytes. +func WithRequestBody(contentType string, body any) RequestOption { + return func(r *requestconfig.RequestConfig) error { + if reader, ok := body.(io.Reader); ok { + r.Body = reader + return r.Apply(WithHeader("Content-Type", contentType)) + } + + if b, ok := body.([]byte); ok { + r.Body = bytes.NewBuffer(b) + return r.Apply(WithHeader("Content-Type", contentType)) + } + + return fmt.Errorf("body must be a byte slice or implement io.Reader") + } +} + +// WithRequestTimeout returns a RequestOption that sets the timeout for +// each request attempt. This should be smaller than the timeout defined in +// the context, which spans all retries. +func WithRequestTimeout(dur time.Duration) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.RequestTimeout = dur + return nil + } +} + +// WithEnvironmentProduction returns a RequestOption that sets the current +// environment to be the "production" environment. An environment specifies which base URL +// to use by default. +func WithEnvironmentProduction() RequestOption { + return WithBaseURL("https://api.openai.com/v1/") +} + +// WithAPIKey returns a RequestOption that sets the client setting "api_key". +func WithAPIKey(value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.APIKey = value + return r.Apply(WithHeader("authorization", fmt.Sprintf("Bearer %s", r.APIKey))) + } +} + +// WithOrganization returns a RequestOption that sets the client setting "organization". +func WithOrganization(value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Organization = value + return r.Apply(WithHeader("OpenAI-Organization", value)) + } +} + +// WithProject returns a RequestOption that sets the client setting "project". +func WithProject(value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Project = value + return r.Apply(WithHeader("OpenAI-Project", value)) + } +} diff --git a/packages/ssestream/streaming.go b/packages/ssestream/streaming.go new file mode 100644 index 0000000..d9b4333 --- /dev/null +++ b/packages/ssestream/streaming.go @@ -0,0 +1,168 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package ssestream + +import ( + "bufio" + "bytes" + "encoding/json" + "io" + "net/http" + "strings" +) + +type Decoder interface { + Event() Event + Next() bool + Close() error + Err() error +} + +func NewDecoder(res *http.Response) Decoder { + if res == nil || res.Body == nil { + return nil + } + + var decoder Decoder + contentType := res.Header.Get("content-type") + if t, ok := decoderTypes[contentType]; ok { + decoder = t(res.Body) + } else { + scanner := bufio.NewScanner(res.Body) + decoder = &eventStreamDecoder{rc: res.Body, scn: scanner} + } + return decoder +} + +var decoderTypes = map[string](func(io.ReadCloser) Decoder){} + +func RegisterDecoder(contentType string, decoder func(io.ReadCloser) Decoder) { + decoderTypes[strings.ToLower(contentType)] = decoder +} + +type Event struct { + Type string + Data []byte +} + +// A base implementation of a Decoder for text/event-stream. +type eventStreamDecoder struct { + evt Event + rc io.ReadCloser + scn *bufio.Scanner + err error +} + +func (s *eventStreamDecoder) Next() bool { + if s.err != nil { + return false + } + + event := "" + data := bytes.NewBuffer(nil) + + for s.scn.Scan() { + txt := s.scn.Bytes() + + // Dispatch event on an empty line + if len(txt) == 0 { + s.evt = Event{ + Type: event, + Data: data.Bytes(), + } + return true + } + + // Split a string like "event: bar" into name="event" and value=" bar". + name, value, _ := bytes.Cut(txt, []byte(":")) + + // Consume an optional space after the colon if it exists. + if len(value) > 0 && value[0] == ' ' { + value = value[1:] + } + + switch string(name) { + case "": + // An empty line in the for ": something" is a comment and should be ignored. + continue + case "event": + event = string(value) + case "data": + _, s.err = data.Write(value) + if s.err != nil { + break + } + _, s.err = data.WriteRune('\n') + if s.err != nil { + break + } + } + } + + return false +} + +func (s *eventStreamDecoder) Event() Event { + return s.evt +} + +func (s *eventStreamDecoder) Close() error { + return s.rc.Close() +} + +func (s *eventStreamDecoder) Err() error { + return s.err +} + +type Stream[T any] struct { + decoder Decoder + cur T + err error + done bool +} + +func NewStream[T any](decoder Decoder, err error) *Stream[T] { + return &Stream[T]{ + decoder: decoder, + err: err, + } +} + +func (s *Stream[T]) Next() bool { + if s.err != nil { + return false + } + + for s.decoder.Next() { + if s.done { + return false + } + + if bytes.HasPrefix(s.decoder.Event().Data, []byte("[DONE]")) { + s.done = true + return false + } + + if s.decoder.Event().Type == "" { + s.err = json.Unmarshal(s.decoder.Event().Data, &s.cur) + if s.err != nil { + return false + } + return true + } + } + + return false +} + +func (s *Stream[T]) Current() T { + return s.cur +} + +func (s *Stream[T]) Err() error { + return s.err +} + +func (s *Stream[T]) Close() error { + return s.decoder.Close() +} diff --git a/paginationauto_test.go b/paginationauto_test.go new file mode 100644 index 0000000..eb78488 --- /dev/null +++ b/paginationauto_test.go @@ -0,0 +1,38 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestAutoPagination(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + iter := client.FineTuning.Jobs.ListAutoPaging(context.TODO(), openai.FineTuningJobListParams{ + Limit: openai.F(int64(20)), + }) + // Prism mock isn't going to give us real pagination + for i := 0; i < 3 && iter.Next(); i++ { + job := iter.Current() + t.Logf("%+v\n", job.ID) + } + if err := iter.Err(); err != nil { + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/paginationmanual_test.go b/paginationmanual_test.go new file mode 100644 index 0000000..04492e8 --- /dev/null +++ b/paginationmanual_test.go @@ -0,0 +1,46 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestManualPagination(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + page, err := client.FineTuning.Jobs.List(context.TODO(), openai.FineTuningJobListParams{ + Limit: openai.F(int64(20)), + }) + if err != nil { + t.Fatalf("err should be nil: %s", err.Error()) + } + for _, job := range page.Data { + t.Logf("%+v\n", job.ID) + } + // Prism mock isn't going to give us real pagination + page, err = page.GetNextPage() + if err != nil { + t.Fatalf("err should be nil: %s", err.Error()) + } + if page != nil { + for _, job := range page.Data { + t.Logf("%+v\n", job.ID) + } + } +} diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 0000000..a38198e --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,67 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "go", + "extra-files": [ + "internal/version.go", + "README.md" + ] +} \ No newline at end of file diff --git a/scripts/bootstrap b/scripts/bootstrap new file mode 100755 index 0000000..ed03e52 --- /dev/null +++ b/scripts/bootstrap @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then + brew bundle check >/dev/null 2>&1 || { + echo "==> Installing Homebrew dependencies…" + brew bundle + } +fi + +echo "==> Installing Go dependencies…" + +go mod tidy diff --git a/scripts/format b/scripts/format new file mode 100755 index 0000000..db2a3fa --- /dev/null +++ b/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running gofmt -s -w" +gofmt -s -w . diff --git a/scripts/lint b/scripts/lint new file mode 100755 index 0000000..fa7ba1f --- /dev/null +++ b/scripts/lint @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running Go build" +go build ./... diff --git a/scripts/mock b/scripts/mock new file mode 100755 index 0000000..f586157 --- /dev/null +++ b/scripts/mock @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" && "$1" != '--'* ]]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +echo "==> Starting mock server with URL ${URL}" + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" &> .prism.log & + + # Wait for server to come online + echo -n "Waiting for server" + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" +fi diff --git a/scripts/test b/scripts/test new file mode 100755 index 0000000..efebcea --- /dev/null +++ b/scripts/test @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +echo "==> Running tests" +go test ./... "$@" diff --git a/shared/shared.go b/shared/shared.go new file mode 100644 index 0000000..4c0f3ca --- /dev/null +++ b/shared/shared.go @@ -0,0 +1,93 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package shared + +import ( + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" +) + +type ErrorObject struct { + Code string `json:"code,required,nullable"` + Message string `json:"message,required"` + Param string `json:"param,required,nullable"` + Type string `json:"type,required"` + JSON errorObjectJSON `json:"-"` +} + +// errorObjectJSON contains the JSON metadata for the struct [ErrorObject] +type errorObjectJSON struct { + Code apijson.Field + Message apijson.Field + Param apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ErrorObject) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r errorObjectJSON) RawJSON() string { + return r.raw +} + +type FunctionDefinition struct { + // The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + // underscores and dashes, with a maximum length of 64. + Name string `json:"name,required"` + // A description of what the function does, used by the model to choose when and + // how to call the function. + Description string `json:"description"` + // The parameters the functions accepts, described as a JSON Schema object. See the + // [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + // and the + // [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + // documentation about the format. + // + // Omitting `parameters` defines a function with an empty parameter list. + Parameters FunctionParameters `json:"parameters"` + JSON functionDefinitionJSON `json:"-"` +} + +// functionDefinitionJSON contains the JSON metadata for the struct +// [FunctionDefinition] +type functionDefinitionJSON struct { + Name apijson.Field + Description apijson.Field + Parameters apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *FunctionDefinition) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r functionDefinitionJSON) RawJSON() string { + return r.raw +} + +type FunctionDefinitionParam struct { + // The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + // underscores and dashes, with a maximum length of 64. + Name param.Field[string] `json:"name,required"` + // A description of what the function does, used by the model to choose when and + // how to call the function. + Description param.Field[string] `json:"description"` + // The parameters the functions accepts, described as a JSON Schema object. See the + // [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + // and the + // [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + // documentation about the format. + // + // Omitting `parameters` defines a function with an empty parameter list. + Parameters param.Field[FunctionParameters] `json:"parameters"` +} + +func (r FunctionDefinitionParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type FunctionParameters map[string]interface{} diff --git a/shared/union.go b/shared/union.go new file mode 100644 index 0000000..cea8002 --- /dev/null +++ b/shared/union.go @@ -0,0 +1,26 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package shared + +type UnionString string + +func (UnionString) ImplementsCompletionNewParamsPromptUnion() {} +func (UnionString) ImplementsCompletionNewParamsStopUnion() {} +func (UnionString) ImplementsChatCompletionUserMessageParamContentUnion() {} +func (UnionString) ImplementsChatCompletionNewParamsStopUnion() {} +func (UnionString) ImplementsEmbeddingNewParamsInputUnion() {} +func (UnionString) ImplementsModerationNewParamsInputUnion() {} +func (UnionString) ImplementsBetaThreadNewParamsMessagesContentUnion() {} +func (UnionString) ImplementsBetaThreadNewAndRunParamsThreadMessagesContentUnion() {} +func (UnionString) ImplementsBetaThreadRunNewParamsAdditionalMessagesContentUnion() {} +func (UnionString) ImplementsBetaThreadMessageNewParamsContentUnion() {} + +type UnionInt int64 + +func (UnionInt) ImplementsFineTuningJobHyperparametersNEpochsUnion() {} +func (UnionInt) ImplementsFineTuningJobNewParamsHyperparametersBatchSizeUnion() {} +func (UnionInt) ImplementsFineTuningJobNewParamsHyperparametersNEpochsUnion() {} + +type UnionFloat float64 + +func (UnionFloat) ImplementsFineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion() {} diff --git a/upload.go b/upload.go new file mode 100644 index 0000000..d39cbfd --- /dev/null +++ b/upload.go @@ -0,0 +1,234 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// UploadService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewUploadService] method instead. +type UploadService struct { + Options []option.RequestOption + Parts *UploadPartService +} + +// NewUploadService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewUploadService(opts ...option.RequestOption) (r *UploadService) { + r = &UploadService{} + r.Options = opts + r.Parts = NewUploadPartService(opts...) + return +} + +// Creates an intermediate +// [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object +// that you can add +// [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. +// Currently, an Upload can accept at most 8 GB in total and expires after an hour +// after you create it. +// +// Once you complete the Upload, we will create a +// [File](https://platform.openai.com/docs/api-reference/files/object) object that +// contains all the parts you uploaded. This File is usable in the rest of our +// platform as a regular File object. +// +// For certain `purpose`s, the correct `mime_type` must be specified. Please refer +// to documentation for the supported MIME types for your use case: +// +// - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) +// +// For guidance on the proper filename extensions for each purpose, please follow +// the documentation on +// [creating a File](https://platform.openai.com/docs/api-reference/files/create). +func (r *UploadService) New(ctx context.Context, body UploadNewParams, opts ...option.RequestOption) (res *Upload, err error) { + opts = append(r.Options[:], opts...) + path := "uploads" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Cancels the Upload. No Parts may be added after an Upload is cancelled. +func (r *UploadService) Cancel(ctx context.Context, uploadID string, opts ...option.RequestOption) (res *Upload, err error) { + opts = append(r.Options[:], opts...) + if uploadID == "" { + err = errors.New("missing required upload_id parameter") + return + } + path := fmt.Sprintf("uploads/%s/cancel", uploadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +// Completes the +// [Upload](https://platform.openai.com/docs/api-reference/uploads/object). +// +// Within the returned Upload object, there is a nested +// [File](https://platform.openai.com/docs/api-reference/files/object) object that +// is ready to use in the rest of the platform. +// +// You can specify the order of the Parts by passing in an ordered list of the Part +// IDs. +// +// The number of bytes uploaded upon completion must match the number of bytes +// initially specified when creating the Upload object. No Parts may be added after +// an Upload is completed. +func (r *UploadService) Complete(ctx context.Context, uploadID string, body UploadCompleteParams, opts ...option.RequestOption) (res *Upload, err error) { + opts = append(r.Options[:], opts...) + if uploadID == "" { + err = errors.New("missing required upload_id parameter") + return + } + path := fmt.Sprintf("uploads/%s/complete", uploadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// The Upload object can accept byte chunks in the form of Parts. +type Upload struct { + // The Upload unique identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The intended number of bytes to be uploaded. + Bytes int64 `json:"bytes,required"` + // The Unix timestamp (in seconds) for when the Upload was created. + CreatedAt int64 `json:"created_at,required"` + // The Unix timestamp (in seconds) for when the Upload was created. + ExpiresAt int64 `json:"expires_at,required"` + // The name of the file to be uploaded. + Filename string `json:"filename,required"` + // The object type, which is always "upload". + Object UploadObject `json:"object,required"` + // The intended purpose of the file. + // [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + // for acceptable values. + Purpose string `json:"purpose,required"` + // The status of the Upload. + Status UploadStatus `json:"status,required"` + // The ready File object after the Upload is completed. + File FileObject `json:"file,nullable"` + JSON uploadJSON `json:"-"` +} + +// uploadJSON contains the JSON metadata for the struct [Upload] +type uploadJSON struct { + ID apijson.Field + Bytes apijson.Field + CreatedAt apijson.Field + ExpiresAt apijson.Field + Filename apijson.Field + Object apijson.Field + Purpose apijson.Field + Status apijson.Field + File apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Upload) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r uploadJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always "upload". +type UploadObject string + +const ( + UploadObjectUpload UploadObject = "upload" +) + +func (r UploadObject) IsKnown() bool { + switch r { + case UploadObjectUpload: + return true + } + return false +} + +// The status of the Upload. +type UploadStatus string + +const ( + UploadStatusPending UploadStatus = "pending" + UploadStatusCompleted UploadStatus = "completed" + UploadStatusCancelled UploadStatus = "cancelled" + UploadStatusExpired UploadStatus = "expired" +) + +func (r UploadStatus) IsKnown() bool { + switch r { + case UploadStatusPending, UploadStatusCompleted, UploadStatusCancelled, UploadStatusExpired: + return true + } + return false +} + +type UploadNewParams struct { + // The number of bytes in the file you are uploading. + Bytes param.Field[int64] `json:"bytes,required"` + // The name of the file to upload. + Filename param.Field[string] `json:"filename,required"` + // The MIME type of the file. + // + // This must fall within the supported MIME types for your file purpose. See the + // supported MIME types for assistants and vision. + MimeType param.Field[string] `json:"mime_type,required"` + // The intended purpose of the uploaded file. + // + // See the + // [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + Purpose param.Field[UploadNewParamsPurpose] `json:"purpose,required"` +} + +func (r UploadNewParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The intended purpose of the uploaded file. +// +// See the +// [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). +type UploadNewParamsPurpose string + +const ( + UploadNewParamsPurposeAssistants UploadNewParamsPurpose = "assistants" + UploadNewParamsPurposeBatch UploadNewParamsPurpose = "batch" + UploadNewParamsPurposeFineTune UploadNewParamsPurpose = "fine-tune" + UploadNewParamsPurposeVision UploadNewParamsPurpose = "vision" +) + +func (r UploadNewParamsPurpose) IsKnown() bool { + switch r { + case UploadNewParamsPurposeAssistants, UploadNewParamsPurposeBatch, UploadNewParamsPurposeFineTune, UploadNewParamsPurposeVision: + return true + } + return false +} + +type UploadCompleteParams struct { + // The ordered list of Part IDs. + PartIDs param.Field[[]string] `json:"part_ids,required"` + // The optional md5 checksum for the file contents to verify if the bytes uploaded + // matches what you expect. + Md5 param.Field[string] `json:"md5"` +} + +func (r UploadCompleteParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} diff --git a/upload_test.go b/upload_test.go new file mode 100644 index 0000000..43d5d2e --- /dev/null +++ b/upload_test.go @@ -0,0 +1,92 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestUploadNew(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Uploads.New(context.TODO(), openai.UploadNewParams{ + Bytes: openai.F(int64(0)), + Filename: openai.F("filename"), + MimeType: openai.F("mime_type"), + Purpose: openai.F(openai.UploadNewParamsPurposeAssistants), + }) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestUploadCancel(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Uploads.Cancel(context.TODO(), "upload_abc123") + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestUploadCompleteWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Uploads.Complete( + context.TODO(), + "upload_abc123", + openai.UploadCompleteParams{ + PartIDs: openai.F([]string{"string", "string", "string"}), + Md5: openai.F("md5"), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/uploadpart.go b/uploadpart.go new file mode 100644 index 0000000..889fcf1 --- /dev/null +++ b/uploadpart.go @@ -0,0 +1,126 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + + "github.com/openai/openai-go/internal/apiform" + "github.com/openai/openai-go/internal/apijson" + "github.com/openai/openai-go/internal/param" + "github.com/openai/openai-go/internal/requestconfig" + "github.com/openai/openai-go/option" +) + +// UploadPartService contains methods and other services that help with interacting +// with the openai API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewUploadPartService] method instead. +type UploadPartService struct { + Options []option.RequestOption +} + +// NewUploadPartService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewUploadPartService(opts ...option.RequestOption) (r *UploadPartService) { + r = &UploadPartService{} + r.Options = opts + return +} + +// Adds a +// [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an +// [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. +// A Part represents a chunk of bytes from the file you are trying to upload. +// +// Each Part can be at most 64 MB, and you can add Parts until you hit the Upload +// maximum of 8 GB. +// +// It is possible to add multiple Parts in parallel. You can decide the intended +// order of the Parts when you +// [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). +func (r *UploadPartService) New(ctx context.Context, uploadID string, body UploadPartNewParams, opts ...option.RequestOption) (res *UploadPart, err error) { + opts = append(r.Options[:], opts...) + if uploadID == "" { + err = errors.New("missing required upload_id parameter") + return + } + path := fmt.Sprintf("uploads/%s/parts", uploadID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// The upload Part represents a chunk of bytes we can add to an Upload object. +type UploadPart struct { + // The upload Part unique identifier, which can be referenced in API endpoints. + ID string `json:"id,required"` + // The Unix timestamp (in seconds) for when the Part was created. + CreatedAt int64 `json:"created_at,required"` + // The object type, which is always `upload.part`. + Object UploadPartObject `json:"object,required"` + // The ID of the Upload object that this Part was added to. + UploadID string `json:"upload_id,required"` + JSON uploadPartJSON `json:"-"` +} + +// uploadPartJSON contains the JSON metadata for the struct [UploadPart] +type uploadPartJSON struct { + ID apijson.Field + CreatedAt apijson.Field + Object apijson.Field + UploadID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *UploadPart) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r uploadPartJSON) RawJSON() string { + return r.raw +} + +// The object type, which is always `upload.part`. +type UploadPartObject string + +const ( + UploadPartObjectUploadPart UploadPartObject = "upload.part" +) + +func (r UploadPartObject) IsKnown() bool { + switch r { + case UploadPartObjectUploadPart: + return true + } + return false +} + +type UploadPartNewParams struct { + // The chunk of bytes for this Part. + Data param.Field[io.Reader] `json:"data,required" format:"binary"` +} + +func (r UploadPartNewParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} diff --git a/uploadpart_test.go b/uploadpart_test.go new file mode 100644 index 0000000..9da9c2a --- /dev/null +++ b/uploadpart_test.go @@ -0,0 +1,44 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" +) + +func TestUploadPartNew(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Uploads.Parts.New( + context.TODO(), + "upload_abc123", + openai.UploadPartNewParams{ + Data: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), + }, + ) + if err != nil { + var apierr *openai.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/usage_test.go b/usage_test.go new file mode 100644 index 0000000..15512ac --- /dev/null +++ b/usage_test.go @@ -0,0 +1,38 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package openai_test + +import ( + "context" + "os" + "testing" + + "github.com/openai/openai-go" + "github.com/openai/openai-go/internal/testutil" + "github.com/openai/openai-go/option" + "github.com/openai/openai-go/shared" +) + +func TestUsage(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := openai.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), + Model: openai.F(openai.ChatModelGPT4o), + }) + if err != nil { + t.Error(err) + } +}