From bac7d5936108965a9666a65d0d4d55bd0fe78808 Mon Sep 17 00:00:00 2001 From: Winston Liu Date: Thu, 3 Oct 2024 12:17:16 -0700 Subject: [PATCH] fix MaxCompletionTokens typo (#862) * fix spelling error * fix lint * Update chat.go * Update chat.go --- chat.go | 22 +++++++++++----------- chat_test.go | 38 +++++++++++++++++++------------------- completion.go | 2 +- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/chat.go b/chat.go index dd99c530..9adf2808 100644 --- a/chat.go +++ b/chat.go @@ -207,18 +207,18 @@ type ChatCompletionRequest struct { // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens MaxTokens int `json:"max_tokens,omitempty"` - // MaxCompletionsTokens An upper bound for the number of tokens that can be generated for a completion, + // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion, // including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning - MaxCompletionsTokens int `json:"max_completion_tokens,omitempty"` - Temperature float32 `json:"temperature,omitempty"` - TopP float32 `json:"top_p,omitempty"` - N int `json:"n,omitempty"` - Stream bool `json:"stream,omitempty"` - Stop []string `json:"stop,omitempty"` - PresencePenalty float32 `json:"presence_penalty,omitempty"` - ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` - Seed *int `json:"seed,omitempty"` - FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` + MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` + Temperature float32 `json:"temperature,omitempty"` + TopP float32 `json:"top_p,omitempty"` + N int `json:"n,omitempty"` + Stream bool `json:"stream,omitempty"` + Stop []string `json:"stop,omitempty"` + PresencePenalty float32 `json:"presence_penalty,omitempty"` + ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` + Seed *int `json:"seed,omitempty"` + FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string. // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}` // refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias diff --git a/chat_test.go b/chat_test.go index a54dd35e..134026cd 100644 --- a/chat_test.go +++ b/chat_test.go @@ -100,17 +100,17 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "log_probs_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - LogProbs: true, - Model: openai.O1Preview, + MaxCompletionTokens: 1000, + LogProbs: true, + Model: openai.O1Preview, }, expectedError: openai.ErrO1BetaLimitationsLogprobs, }, { name: "message_type_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleSystem, @@ -122,8 +122,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "tool_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, @@ -143,8 +143,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_temperature_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, @@ -160,8 +160,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_top_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, @@ -178,8 +178,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_n_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, @@ -197,8 +197,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_presence_penalty_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, @@ -214,8 +214,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) { { name: "set_frequency_penalty_unsupported", in: openai.ChatCompletionRequest{ - MaxCompletionsTokens: 1000, - Model: openai.O1Mini, + MaxCompletionTokens: 1000, + Model: openai.O1Mini, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, @@ -296,8 +296,8 @@ func TestO1ModelChatCompletions(t *testing.T) { defer teardown() server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint) _, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{ - Model: openai.O1Preview, - MaxCompletionsTokens: 1000, + Model: openai.O1Preview, + MaxCompletionTokens: 1000, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, diff --git a/completion.go b/completion.go index 8e3172ac..80c4d39a 100644 --- a/completion.go +++ b/completion.go @@ -7,7 +7,7 @@ import ( ) var ( - ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionsTokens") //nolint:lll + ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll