diff --git a/chat.go b/chat.go index d47c95e4..46395f35 100644 --- a/chat.go +++ b/chat.go @@ -207,9 +207,9 @@ type ChatCompletionRequest struct { // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens MaxTokens int `json:"max_tokens,omitempty"` - // MaxCompletionsTokens An upper bound for the number of tokens that can be generated for a completion, + // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion, // including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning - MaxCompletionsTokens int `json:"max_completions_tokens,omitempty"` + MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` Temperature float32 `json:"temperature,omitempty"` TopP float32 `json:"top_p,omitempty"` N int `json:"n,omitempty"` diff --git a/chat_test.go b/chat_test.go index a54dd35e..93ac3554 100644 --- a/chat_test.go +++ b/chat_test.go @@ -100,7 +100,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "log_probs_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, LogProbs: true, Model: openai.O1Preview, }, @@ -109,7 +109,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "message_type_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -122,7 +122,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "tool_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -143,7 +143,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_temperature_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -160,7 +160,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_top_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -178,7 +178,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_n_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -197,7 +197,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_presence_penalty_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -214,7 +214,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_frequency_penalty_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { @@ -297,7 +297,7 @@ func TestO1ModelChatCompletions(t *testing.T) { server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint) _, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{ Model: openai.O1Preview, - MaxCompletionsTokens: 1000, + MaxCompletionTokens: 1000, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, diff --git a/completion.go b/completion.go index 8e3172ac..5a154f82 100644 --- a/completion.go +++ b/completion.go @@ -7,7 +7,7 @@ import ( ) var ( - ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionsTokens") //nolint:lll + ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll