Skip to content

Commit cdafc12

Browse files
feat(api): gpt 5.2
1 parent 9f2b871 commit cdafc12

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+188
-104
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 136
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-88d85ff87ad8983262af2b729762a6e05fd509468bb691529bc2f81e4ce27c69.yml
3-
openapi_spec_hash: 46a55acbccd0147534017b92c1f4dd99
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-41f98da99f44ebe6204fce5c1dc9940f85f3472779e797b674c4fdc20306c77d.yml
3+
openapi_spec_hash: c61259027f421f501bdc6b23cf9e430e
44
config_hash: 141b101c9f13b90e21af74e1686f1f41

README.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ openai = OpenAI::Client.new(
3232

3333
chat_completion = openai.chat.completions.create(
3434
messages: [{role: "user", content: "Say this is a test"}],
35-
model: :"gpt-5.1"
35+
model: :"gpt-5.2"
3636
)
3737

3838
puts(chat_completion)
@@ -45,7 +45,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
4545
```ruby
4646
stream = openai.responses.stream(
4747
input: "Write a haiku about OpenAI.",
48-
model: :"gpt-5.1"
48+
model: :"gpt-5.2"
4949
)
5050

5151
stream.each do |event|
@@ -343,7 +343,7 @@ openai = OpenAI::Client.new(
343343
# Or, configure per-request:
344344
openai.chat.completions.create(
345345
messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
346-
model: :"gpt-5.1",
346+
model: :"gpt-5.2",
347347
request_options: {max_retries: 5}
348348
)
349349
```
@@ -361,7 +361,7 @@ openai = OpenAI::Client.new(
361361
# Or, configure per-request:
362362
openai.chat.completions.create(
363363
messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
364-
model: :"gpt-5.1",
364+
model: :"gpt-5.2",
365365
request_options: {timeout: 5}
366366
)
367367
```
@@ -396,7 +396,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
396396
chat_completion =
397397
openai.chat.completions.create(
398398
messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
399-
model: :"gpt-5.1",
399+
model: :"gpt-5.2",
400400
request_options: {
401401
extra_query: {my_query_parameter: value},
402402
extra_body: {my_body_parameter: value},
@@ -444,7 +444,7 @@ You can provide typesafe request parameters like so:
444444
```ruby
445445
openai.chat.completions.create(
446446
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
447-
model: :"gpt-5.1"
447+
model: :"gpt-5.2"
448448
)
449449
```
450450

@@ -454,13 +454,13 @@ Or, equivalently:
454454
# Hashes work, but are not typesafe:
455455
openai.chat.completions.create(
456456
messages: [{role: "user", content: "Say this is a test"}],
457-
model: :"gpt-5.1"
457+
model: :"gpt-5.2"
458458
)
459459

460460
# You can also splat a full Params class:
461461
params = OpenAI::Chat::CompletionCreateParams.new(
462462
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
463-
model: :"gpt-5.1"
463+
model: :"gpt-5.2"
464464
)
465465
openai.chat.completions.create(**params)
466466
```

lib/openai/internal/type/enum.rb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,23 +19,23 @@ module Type
1919
# @example
2020
# # `chat_model` is a `OpenAI::ChatModel`
2121
# case chat_model
22-
# when OpenAI::ChatModel::GPT_5_1
22+
# when OpenAI::ChatModel::GPT_5_2
2323
# # ...
24-
# when OpenAI::ChatModel::GPT_5_1_2025_11_13
24+
# when OpenAI::ChatModel::GPT_5_2_2025_12_11
2525
# # ...
26-
# when OpenAI::ChatModel::GPT_5_1_CODEX
26+
# when OpenAI::ChatModel::GPT_5_2_CHAT_LATEST
2727
# # ...
2828
# else
2929
# puts(chat_model)
3030
# end
3131
#
3232
# @example
3333
# case chat_model
34-
# in :"gpt-5.1"
34+
# in :"gpt-5.2"
3535
# # ...
36-
# in :"gpt-5.1-2025-11-13"
36+
# in :"gpt-5.2-2025-12-11"
3737
# # ...
38-
# in :"gpt-5.1-codex"
38+
# in :"gpt-5.2-chat-latest"
3939
# # ...
4040
# else
4141
# puts(chat_model)

lib/openai/models/beta/assistant_create_params.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel
6161
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
6262
# support `none`.
6363
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
6565
#
6666
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
6767
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true

lib/openai/models/beta/assistant_update_params.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel
6161
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
6262
# support `none`.
6363
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
6565
#
6666
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
6767
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true

lib/openai/models/beta/threads/run_create_params.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel
119119
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
120120
# support `none`.
121121
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
122-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
122+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
123123
#
124124
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
125125
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true

lib/openai/models/chat/completion_create_params.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
214214
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
215215
# support `none`.
216216
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
217-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
217+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
218218
#
219219
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
220220
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true

lib/openai/models/chat_model.rb

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,11 @@ module Models
55
module ChatModel
66
extend OpenAI::Internal::Type::Enum
77

8+
GPT_5_2 = :"gpt-5.2"
9+
GPT_5_2_2025_12_11 = :"gpt-5.2-2025-12-11"
10+
GPT_5_2_CHAT_LATEST = :"gpt-5.2-chat-latest"
11+
GPT_5_2_PRO = :"gpt-5.2-pro"
12+
GPT_5_2_PRO_2025_12_11 = :"gpt-5.2-pro-2025-12-11"
813
GPT_5_1 = :"gpt-5.1"
914
GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
1015
GPT_5_1_CODEX = :"gpt-5.1-codex"

lib/openai/models/evals/create_eval_completions_run_data_source.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
472472
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
473473
# support `none`.
474474
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
475-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
475+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
476476
#
477477
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
478478
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true

lib/openai/models/evals/run_cancel_response.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ class Responses < OpenAI::Internal::Type::BaseModel
326326
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327327
# support `none`.
328328
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330330
#
331331
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332332
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
675675
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676676
# support `none`.
677677
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678-
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678+
# - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679679
#
680680
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681681
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true

0 commit comments

Comments
 (0)