diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 03276cfb..809fed12 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.12.0"
+ ".": "4.13.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 0d142a9e..eb957d45 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 136
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fe8e67bdc351a518b113ab48e775750190e207807903d6b03ab22c438c38a588.yml
-openapi_spec_hash: 8af972190647ffb9dcec516e19d8761a
-config_hash: d013f4fdd4dd59c6f376a9ca482b7f9e
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-75926226b642ebb2cb415694da9dff35e8ab40145ac1b791cefb82a83809db4d.yml
+openapi_spec_hash: 6a0e391b0ba5747b6b4a3e5fe21de4da
+config_hash: adcf23ecf5f84d3cadf1d71e82ec636a
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b25f804f..ec9a42d6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,14 @@
# Changelog
+## 4.13.0 (2025-12-19)
+
+Full Changelog: [v4.12.0...v4.13.0](https://github.com/openai/openai-java/compare/v4.12.0...v4.13.0)
+
+### Features
+
+* **api:** gpt-image-1.5 ([165b588](https://github.com/openai/openai-java/commit/165b588743c301493e0dc8126cfe32c549bdc4ad))
+* **api:** slugs for new audio models; make all `model` params accept strings ([cf46781](https://github.com/openai/openai-java/commit/cf4678180c4c2d4f79356a0dd5228db0a2e4cd84))
+
## 4.12.0 (2025-12-15)
Full Changelog: [v4.11.0...v4.12.0](https://github.com/openai/openai-java/compare/v4.11.0...v4.12.0)
diff --git a/README.md b/README.md
index 91e42936..617395b3 100644
--- a/README.md
+++ b/README.md
@@ -2,8 +2,8 @@
-[](https://central.sonatype.com/artifact/com.openai/openai-java/4.12.0)
-[](https://javadoc.io/doc/com.openai/openai-java/4.12.0)
+[](https://central.sonatype.com/artifact/com.openai/openai-java/4.13.0)
+[](https://javadoc.io/doc/com.openai/openai-java/4.13.0)
@@ -11,7 +11,7 @@ The OpenAI Java SDK provides convenient access to the [OpenAI REST API](https://
-The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). Javadocs are available on [javadoc.io](https://javadoc.io/doc/com.openai/openai-java/4.12.0).
+The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). Javadocs are available on [javadoc.io](https://javadoc.io/doc/com.openai/openai-java/4.13.0).
@@ -24,7 +24,7 @@ The REST API documentation can be found on [platform.openai.com](https://platfor
### Gradle
```kotlin
-implementation("com.openai:openai-java:4.12.0")
+implementation("com.openai:openai-java:4.13.0")
```
### Maven
@@ -33,7 +33,7 @@ implementation("com.openai:openai-java:4.12.0")
com.openai
openai-java
- 4.12.0
+ 4.13.0
```
@@ -1342,7 +1342,7 @@ If you're using Spring Boot, then you can use the SDK's [Spring Boot starter](ht
#### Gradle
```kotlin
-implementation("com.openai:openai-java-spring-boot-starter:4.12.0")
+implementation("com.openai:openai-java-spring-boot-starter:4.13.0")
```
#### Maven
@@ -1351,7 +1351,7 @@ implementation("com.openai:openai-java-spring-boot-starter:4.12.0")
com.openai
openai-java-spring-boot-starter
- 4.12.0
+ 4.13.0
```
diff --git a/build.gradle.kts b/build.gradle.kts
index 0aac8875..5a5edaa3 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -8,7 +8,7 @@ repositories {
allprojects {
group = "com.openai"
- version = "4.12.0" // x-release-please-version
+ version = "4.13.0" // x-release-please-version
}
subprojects {
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/audio/AudioModel.kt b/openai-java-core/src/main/kotlin/com/openai/models/audio/AudioModel.kt
index 096cb82a..a666e028 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/audio/AudioModel.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/audio/AudioModel.kt
@@ -26,6 +26,8 @@ class AudioModel @JsonCreator private constructor(private val value: JsonField Value.WHISPER_1
GPT_4O_TRANSCRIBE -> Value.GPT_4O_TRANSCRIBE
GPT_4O_MINI_TRANSCRIBE -> Value.GPT_4O_MINI_TRANSCRIBE
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 -> Value.GPT_4O_MINI_TRANSCRIBE_2025_12_15
GPT_4O_TRANSCRIBE_DIARIZE -> Value.GPT_4O_TRANSCRIBE_DIARIZE
else -> Value._UNKNOWN
}
@@ -86,6 +91,7 @@ class AudioModel @JsonCreator private constructor(private val value: JsonField Known.WHISPER_1
GPT_4O_TRANSCRIBE -> Known.GPT_4O_TRANSCRIBE
GPT_4O_MINI_TRANSCRIBE -> Known.GPT_4O_MINI_TRANSCRIBE
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 -> Known.GPT_4O_MINI_TRANSCRIBE_2025_12_15
GPT_4O_TRANSCRIBE_DIARIZE -> Known.GPT_4O_TRANSCRIBE_DIARIZE
else -> throw OpenAIInvalidDataException("Unknown AudioModel: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechCreateParams.kt
index 0c2a99a4..9df7f0a9 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechCreateParams.kt
@@ -39,7 +39,7 @@ private constructor(
/**
* One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`,
- * `tts-1-hd` or `gpt-4o-mini-tts`.
+ * `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -206,7 +206,7 @@ private constructor(
/**
* One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`,
- * `tts-1-hd` or `gpt-4o-mini-tts`.
+ * `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
*/
fun model(model: SpeechModel) = apply { body.model(model) }
@@ -512,7 +512,7 @@ private constructor(
/**
* One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`,
- * `tts-1-hd` or `gpt-4o-mini-tts`.
+ * `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -690,7 +690,7 @@ private constructor(
/**
* One of the available [TTS models](https://platform.openai.com/docs/models#tts):
- * `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+ * `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
*/
fun model(model: SpeechModel) = model(JsonField.of(model))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechModel.kt b/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechModel.kt
index de6a0324..fb2a2304 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechModel.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/audio/speech/SpeechModel.kt
@@ -26,6 +26,8 @@ class SpeechModel @JsonCreator private constructor(private val value: JsonField<
@JvmField val GPT_4O_MINI_TTS = of("gpt-4o-mini-tts")
+ @JvmField val GPT_4O_MINI_TTS_2025_12_15 = of("gpt-4o-mini-tts-2025-12-15")
+
@JvmStatic fun of(value: String) = SpeechModel(JsonField.of(value))
}
@@ -34,6 +36,7 @@ class SpeechModel @JsonCreator private constructor(private val value: JsonField<
TTS_1,
TTS_1_HD,
GPT_4O_MINI_TTS,
+ GPT_4O_MINI_TTS_2025_12_15,
}
/**
@@ -49,6 +52,7 @@ class SpeechModel @JsonCreator private constructor(private val value: JsonField<
TTS_1,
TTS_1_HD,
GPT_4O_MINI_TTS,
+ GPT_4O_MINI_TTS_2025_12_15,
/** An enum member indicating that [SpeechModel] was instantiated with an unknown value. */
_UNKNOWN,
}
@@ -65,6 +69,7 @@ class SpeechModel @JsonCreator private constructor(private val value: JsonField<
TTS_1 -> Value.TTS_1
TTS_1_HD -> Value.TTS_1_HD
GPT_4O_MINI_TTS -> Value.GPT_4O_MINI_TTS
+ GPT_4O_MINI_TTS_2025_12_15 -> Value.GPT_4O_MINI_TTS_2025_12_15
else -> Value._UNKNOWN
}
@@ -81,6 +86,7 @@ class SpeechModel @JsonCreator private constructor(private val value: JsonField<
TTS_1 -> Known.TTS_1
TTS_1_HD -> Known.TTS_1_HD
GPT_4O_MINI_TTS -> Known.GPT_4O_MINI_TTS
+ GPT_4O_MINI_TTS_2025_12_15 -> Known.GPT_4O_MINI_TTS_2025_12_15
else -> throw OpenAIInvalidDataException("Unknown SpeechModel: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/audio/transcriptions/TranscriptionCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/audio/transcriptions/TranscriptionCreateParams.kt
index 5bfdbf7d..ebccd5c0 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/audio/transcriptions/TranscriptionCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/audio/transcriptions/TranscriptionCreateParams.kt
@@ -53,8 +53,8 @@ private constructor(
/**
* ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`,
- * `whisper-1` (which is powered by our open source Whisper V2 model), and
- * `gpt-4o-transcribe-diarize`.
+ * `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is powered by our open source Whisper
+ * V2 model), and `gpt-4o-transcribe-diarize`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -77,8 +77,9 @@ private constructor(
* Additional information to include in the transcription response. `logprobs` will return the
* log probabilities of the tokens in the response to understand the model's confidence in the
* transcription. `logprobs` only works with response_format set to `json` and only with the
- * models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`. This field is not supported when
- * using `gpt-4o-transcribe-diarize`.
+ * models `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and
+ * `gpt-4o-mini-transcribe-2025-12-15`. This field is not supported when using
+ * `gpt-4o-transcribe-diarize`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -328,8 +329,8 @@ private constructor(
/**
* ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`,
- * `whisper-1` (which is powered by our open source Whisper V2 model), and
- * `gpt-4o-transcribe-diarize`.
+ * `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is powered by our open source
+ * Whisper V2 model), and `gpt-4o-transcribe-diarize`.
*/
fun model(model: AudioModel) = apply { body.model(model) }
@@ -388,8 +389,9 @@ private constructor(
* Additional information to include in the transcription response. `logprobs` will return
* the log probabilities of the tokens in the response to understand the model's confidence
* in the transcription. `logprobs` only works with response_format set to `json` and only
- * with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`. This field is not
- * supported when using `gpt-4o-transcribe-diarize`.
+ * with the models `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and
+ * `gpt-4o-mini-transcribe-2025-12-15`. This field is not supported when using
+ * `gpt-4o-transcribe-diarize`.
*/
fun include(include: List) = apply { body.include(include) }
@@ -759,8 +761,8 @@ private constructor(
/**
* ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`,
- * `whisper-1` (which is powered by our open source Whisper V2 model), and
- * `gpt-4o-transcribe-diarize`.
+ * `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is powered by our open source
+ * Whisper V2 model), and `gpt-4o-transcribe-diarize`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -784,8 +786,9 @@ private constructor(
* Additional information to include in the transcription response. `logprobs` will return
* the log probabilities of the tokens in the response to understand the model's confidence
* in the transcription. `logprobs` only works with response_format set to `json` and only
- * with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`. This field is not
- * supported when using `gpt-4o-transcribe-diarize`.
+ * with the models `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and
+ * `gpt-4o-mini-transcribe-2025-12-15`. This field is not supported when using
+ * `gpt-4o-transcribe-diarize`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1069,8 +1072,8 @@ private constructor(
/**
* ID of the model to use. The options are `gpt-4o-transcribe`,
- * `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source Whisper V2
- * model), and `gpt-4o-transcribe-diarize`.
+ * `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is
+ * powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
*/
fun model(model: AudioModel) = model(MultipartField.of(model))
@@ -1132,8 +1135,9 @@ private constructor(
* Additional information to include in the transcription response. `logprobs` will
* return the log probabilities of the tokens in the response to understand the model's
* confidence in the transcription. `logprobs` only works with response_format set to
- * `json` and only with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`.
- * This field is not supported when using `gpt-4o-transcribe-diarize`.
+ * `json` and only with the models `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and
+ * `gpt-4o-mini-transcribe-2025-12-15`. This field is not supported when using
+ * `gpt-4o-transcribe-diarize`.
*/
fun include(include: List) = include(MultipartField.of(include))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/Image.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/Image.kt
index 54007ebd..e3192cbc 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/Image.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/Image.kt
@@ -35,8 +35,8 @@ private constructor(
) : this(b64Json, revisedPrompt, url, mutableMapOf())
/**
- * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, and only
- * present if `response_format` is set to `b64_json` for `dall-e-2` and `dall-e-3`.
+ * The base64-encoded JSON of the generated image. Returned by default for the GPT image models,
+ * and only present if `response_format` is set to `b64_json` for `dall-e-2` and `dall-e-3`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -53,7 +53,7 @@ private constructor(
/**
* When using `dall-e-2` or `dall-e-3`, the URL of the generated image if `response_format` is
- * set to `url` (default value). Unsupported for `gpt-image-1`.
+ * set to `url` (default value). Unsupported for the GPT image models.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -118,8 +118,9 @@ private constructor(
}
/**
- * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, and only
- * present if `response_format` is set to `b64_json` for `dall-e-2` and `dall-e-3`.
+ * The base64-encoded JSON of the generated image. Returned by default for the GPT image
+ * models, and only present if `response_format` is set to `b64_json` for `dall-e-2` and
+ * `dall-e-3`.
*/
fun b64Json(b64Json: String) = b64Json(JsonField.of(b64Json))
@@ -147,7 +148,7 @@ private constructor(
/**
* When using `dall-e-2` or `dall-e-3`, the URL of the generated image if `response_format`
- * is set to `url` (default value). Unsupported for `gpt-image-1`.
+ * is set to `url` (default value). Unsupported for the GPT image models.
*/
fun url(url: String) = url(JsonField.of(url))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditCompletedEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditCompletedEvent.kt
index 2a9626a2..ebc631d0 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditCompletedEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditCompletedEvent.kt
@@ -120,7 +120,7 @@ private constructor(
@JsonProperty("type") @ExcludeMissing fun _type(): JsonValue = type
/**
- * For `gpt-image-1` only, the token usage information for the image generation.
+ * For the GPT image models only, the token usage information for the image generation.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -321,7 +321,7 @@ private constructor(
*/
fun type(type: JsonValue) = apply { this.type = type }
- /** For `gpt-image-1` only, the token usage information for the image generation. */
+ /** For the GPT image models only, the token usage information for the image generation. */
fun usage(usage: Usage) = usage(JsonField.of(usage))
/**
@@ -974,7 +974,7 @@ private constructor(
override fun toString() = value.toString()
}
- /** For `gpt-image-1` only, the token usage information for the image generation. */
+ /** For the GPT image models only, the token usage information for the image generation. */
class Usage
@JsonCreator(mode = JsonCreator.Mode.DISABLED)
private constructor(
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditParams.kt
index e0612f18..7f53e811 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageEditParams.kt
@@ -45,8 +45,8 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 50MB. You
- * can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`), each image
+ * should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file less
* than 4MB.
@@ -58,7 +58,7 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 1000 characters for
- * `dall-e-2`, and 32000 characters for `gpt-image-1`.
+ * `dall-e-2`, and 32000 characters for the GPT image models.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -67,9 +67,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter is
- * only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto` (default
- * value). When `auto` is used, the model will automatically determine the best background for
- * the image.
+ * only supported for the GPT image models. Must be one of `transparent`, `opaque` or `auto`
+ * (default value). When `auto` is used, the model will automatically determine the best
+ * background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -101,8 +101,9 @@ private constructor(
fun mask(): Optional = body.mask()
/**
- * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported.
- * Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
+ * The model to use for image generation. Only `dall-e-2` and the GPT image models are
+ * supported. Defaults to `dall-e-2` unless a parameter specific to the GPT image models is
+ * used.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -119,7 +120,7 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only supported for
- * `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
+ * the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -128,7 +129,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported for
- * `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`.
+ * the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -150,7 +151,8 @@ private constructor(
/**
* The quality of the image that will be generated. `high`, `medium` and `low` are only
- * supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`.
+ * supported for the GPT image models. `dall-e-2` only supports `standard` quality. Defaults to
+ * `auto`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -160,7 +162,7 @@ private constructor(
/**
* The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* URLs are only valid for 60 minutes after the image has been generated. This parameter is only
- * supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images.
+ * supported for `dall-e-2`, as the GPT image models always return base64-encoded images.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -169,8 +171,8 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and one of `256x256`,
- * `512x512`, or `1024x1024` for `dall-e-2`.
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, and one of
+ * `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -345,8 +347,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 50MB.
- * You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`), each
+ * image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16
+ * images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file less
* than 4MB.
@@ -367,8 +370,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 50MB.
- * You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`), each
+ * image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16
+ * images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file less
* than 4MB.
@@ -378,8 +382,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 50MB.
- * You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`), each
+ * image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16
+ * images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file less
* than 4MB.
@@ -393,7 +398,7 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 1000 characters for
- * `dall-e-2`, and 32000 characters for `gpt-image-1`.
+ * `dall-e-2`, and 32000 characters for the GPT image models.
*/
fun prompt(prompt: String) = apply { body.prompt(prompt) }
@@ -407,9 +412,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter
- * is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto`
- * (default value). When `auto` is used, the model will automatically determine the best
- * background for the image.
+ * is only supported for the GPT image models. Must be one of `transparent`, `opaque` or
+ * `auto` (default value). When `auto` is used, the model will automatically determine the
+ * best background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -488,8 +493,9 @@ private constructor(
fun mask(path: Path) = apply { body.mask(path) }
/**
- * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported.
- * Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
+ * The model to use for image generation. Only `dall-e-2` and the GPT image models are
+ * supported. Defaults to `dall-e-2` unless a parameter specific to the GPT image models is
+ * used.
*/
fun model(model: ImageModel?) = apply { body.model(model) }
@@ -536,7 +542,7 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only supported
- * for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
+ * for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100.
*/
fun outputCompression(outputCompression: Long?) = apply {
body.outputCompression(outputCompression)
@@ -567,7 +573,8 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported
- * for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`.
+ * for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is
+ * `png`.
*/
fun outputFormat(outputFormat: OutputFormat?) = apply { body.outputFormat(outputFormat) }
@@ -619,8 +626,8 @@ private constructor(
/**
* The quality of the image that will be generated. `high`, `medium` and `low` are only
- * supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to
- * `auto`.
+ * supported for the GPT image models. `dall-e-2` only supports `standard` quality. Defaults
+ * to `auto`.
*/
fun quality(quality: Quality?) = apply { body.quality(quality) }
@@ -638,7 +645,7 @@ private constructor(
/**
* The format in which the generated images are returned. Must be one of `url` or
* `b64_json`. URLs are only valid for 60 minutes after the image has been generated. This
- * parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return
+ * parameter is only supported for `dall-e-2`, as the GPT image models always return
* base64-encoded images.
*/
fun responseFormat(responseFormat: ResponseFormat?) = apply {
@@ -662,7 +669,7 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and one of
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, and one of
* `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
*/
fun size(size: Size?) = apply { body.size(size) }
@@ -872,8 +879,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 50MB.
- * You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`), each
+ * image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16
+ * images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file less
* than 4MB.
@@ -885,7 +893,7 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 1000 characters for
- * `dall-e-2`, and 32000 characters for `gpt-image-1`.
+ * `dall-e-2`, and 32000 characters for the GPT image models.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -894,9 +902,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter
- * is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto`
- * (default value). When `auto` is used, the model will automatically determine the best
- * background for the image.
+ * is only supported for the GPT image models. Must be one of `transparent`, `opaque` or
+ * `auto` (default value). When `auto` is used, the model will automatically determine the
+ * best background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -929,8 +937,9 @@ private constructor(
fun mask(): Optional = mask.value.getOptional("mask")
/**
- * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported.
- * Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
+ * The model to use for image generation. Only `dall-e-2` and the GPT image models are
+ * supported. Defaults to `dall-e-2` unless a parameter specific to the GPT image models is
+ * used.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -947,7 +956,7 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only supported
- * for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
+ * for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -957,7 +966,8 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported
- * for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`.
+ * for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is
+ * `png`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -979,8 +989,8 @@ private constructor(
/**
* The quality of the image that will be generated. `high`, `medium` and `low` are only
- * supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to
- * `auto`.
+ * supported for the GPT image models. `dall-e-2` only supports `standard` quality. Defaults
+ * to `auto`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -990,7 +1000,7 @@ private constructor(
/**
* The format in which the generated images are returned. Must be one of `url` or
* `b64_json`. URLs are only valid for 60 minutes after the image has been generated. This
- * parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return
+ * parameter is only supported for `dall-e-2`, as the GPT image models always return
* base64-encoded images.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
@@ -1001,7 +1011,7 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and one of
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, and one of
* `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
@@ -1203,8 +1213,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- * 50MB. You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`),
+ * each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide
+ * up to 16 images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file
* less than 4MB.
@@ -1232,8 +1243,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- * 50MB. You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`),
+ * each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide
+ * up to 16 images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file
* less than 4MB.
@@ -1243,8 +1255,9 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- * 50MB. You can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`),
+ * each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide
+ * up to 16 images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file
* less than 4MB.
@@ -1264,7 +1277,7 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 1000 characters for
- * `dall-e-2`, and 32000 characters for `gpt-image-1`.
+ * `dall-e-2`, and 32000 characters for the GPT image models.
*/
fun prompt(prompt: String) = prompt(MultipartField.of(prompt))
@@ -1279,8 +1292,8 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This
- * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque`
- * or `auto` (default value). When `auto` is used, the model will automatically
+ * parameter is only supported for the GPT image models. Must be one of `transparent`,
+ * `opaque` or `auto` (default value). When `auto` is used, the model will automatically
* determine the best background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be
@@ -1366,9 +1379,9 @@ private constructor(
)
/**
- * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- * supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is
- * used.
+ * The model to use for image generation. Only `dall-e-2` and the GPT image models are
+ * supported. Defaults to `dall-e-2` unless a parameter specific to the GPT image models
+ * is used.
*/
fun model(model: ImageModel?) = model(MultipartField.of(model))
@@ -1417,8 +1430,8 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only
- * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults
- * to 100.
+ * supported for the GPT image models with the `webp` or `jpeg` output formats, and
+ * defaults to 100.
*/
fun outputCompression(outputCompression: Long?) =
outputCompression(MultipartField.of(outputCompression))
@@ -1450,8 +1463,8 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only
- * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The default
- * value is `png`.
+ * supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
+ * default value is `png`.
*/
fun outputFormat(outputFormat: OutputFormat?) =
outputFormat(MultipartField.of(outputFormat))
@@ -1506,8 +1519,8 @@ private constructor(
/**
* The quality of the image that will be generated. `high`, `medium` and `low` are only
- * supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to
- * `auto`.
+ * supported for the GPT image models. `dall-e-2` only supports `standard` quality.
+ * Defaults to `auto`.
*/
fun quality(quality: Quality?) = quality(MultipartField.of(quality))
@@ -1526,8 +1539,8 @@ private constructor(
/**
* The format in which the generated images are returned. Must be one of `url` or
* `b64_json`. URLs are only valid for 60 minutes after the image has been generated.
- * This parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return
- * base64-encoded images.
+ * This parameter is only supported for `dall-e-2`, as the GPT image models always
+ * return base64-encoded images.
*/
fun responseFormat(responseFormat: ResponseFormat?) =
responseFormat(MultipartField.of(responseFormat))
@@ -1549,8 +1562,8 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- * (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and
- * one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+ * (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
+ * models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
*/
fun size(size: Size?) = size(MultipartField.of(size))
@@ -1718,8 +1731,8 @@ private constructor(
/**
* The image(s) to edit. Must be a supported image file or an array of images.
*
- * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 50MB. You
- * can provide up to 16 images.
+ * For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and `gpt-image-1.5`), each image
+ * should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png` file less
* than 4MB.
@@ -1849,9 +1862,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter is
- * only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto` (default
- * value). When `auto` is used, the model will automatically determine the best background for
- * the image.
+ * only supported for the GPT image models. Must be one of `transparent`, `opaque` or `auto`
+ * (default value). When `auto` is used, the model will automatically determine the best
+ * background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -2125,7 +2138,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported for
- * `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`.
+ * the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`.
*/
class OutputFormat @JsonCreator private constructor(private val value: JsonField) :
Enum {
@@ -2263,7 +2276,8 @@ private constructor(
/**
* The quality of the image that will be generated. `high`, `medium` and `low` are only
- * supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`.
+ * supported for the GPT image models. `dall-e-2` only supports `standard` quality. Defaults to
+ * `auto`.
*/
class Quality @JsonCreator private constructor(private val value: JsonField) : Enum {
@@ -2411,7 +2425,7 @@ private constructor(
/**
* The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* URLs are only valid for 60 minutes after the image has been generated. This parameter is only
- * supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images.
+ * supported for `dall-e-2`, as the GPT image models always return base64-encoded images.
*/
class ResponseFormat @JsonCreator private constructor(private val value: JsonField) :
Enum {
@@ -2544,8 +2558,8 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and one of `256x256`,
- * `512x512`, or `1024x1024` for `dall-e-2`.
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, and one of
+ * `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
*/
class Size @JsonCreator private constructor(private val value: JsonField) : Enum {
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenCompletedEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenCompletedEvent.kt
index 736f5256..cc119f3f 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenCompletedEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenCompletedEvent.kt
@@ -120,7 +120,7 @@ private constructor(
@JsonProperty("type") @ExcludeMissing fun _type(): JsonValue = type
/**
- * For `gpt-image-1` only, the token usage information for the image generation.
+ * For the GPT image models only, the token usage information for the image generation.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -321,7 +321,7 @@ private constructor(
*/
fun type(type: JsonValue) = apply { this.type = type }
- /** For `gpt-image-1` only, the token usage information for the image generation. */
+ /** For the GPT image models only, the token usage information for the image generation. */
fun usage(usage: Usage) = usage(JsonField.of(usage))
/**
@@ -974,7 +974,7 @@ private constructor(
override fun toString() = value.toString()
}
- /** For `gpt-image-1` only, the token usage information for the image generation. */
+ /** For the GPT image models only, the token usage information for the image generation. */
class Usage
@JsonCreator(mode = JsonCreator.Mode.DISABLED)
private constructor(
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenerateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenerateParams.kt
index 81b265cf..ad39d5f0 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenerateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageGenerateParams.kt
@@ -32,8 +32,8 @@ private constructor(
) : Params {
/**
- * A text description of the desired image(s). The maximum length is 32000 characters for
- * `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+ * A text description of the desired image(s). The maximum length is 32000 characters for the
+ * GPT image models, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -42,9 +42,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter is
- * only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto` (default
- * value). When `auto` is used, the model will automatically determine the best background for
- * the image.
+ * only supported for the GPT image models. Must be one of `transparent`, `opaque` or `auto`
+ * (default value). When `auto` is used, the model will automatically determine the best
+ * background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -55,8 +55,9 @@ private constructor(
fun background(): Optional = body.background()
/**
- * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`.
- * Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
+ * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT image model
+ * (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to `dall-e-2` unless a
+ * parameter specific to the GPT image models is used.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -64,8 +65,8 @@ private constructor(
fun model(): Optional = body.model()
/**
- * Control the content-moderation level for images generated by `gpt-image-1`. Must be either
- * `low` for less restrictive filtering or `auto` (default value).
+ * Control the content-moderation level for images generated by the GPT image models. Must be
+ * either `low` for less restrictive filtering or `auto` (default value).
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -83,7 +84,7 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only supported for
- * `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
+ * the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -92,7 +93,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported for
- * `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ * the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -115,7 +116,7 @@ private constructor(
/**
* The quality of the image that will be generated.
* - `auto` (default value) will automatically select the best quality for the given model.
- * - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ * - `high`, `medium` and `low` are supported for the GPT image models.
* - `hd` and `standard` are supported for `dall-e-3`.
* - `standard` is the only option for `dall-e-2`.
*
@@ -127,7 +128,7 @@ private constructor(
/**
* The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one
* of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been
- * generated. This parameter isn't supported for `gpt-image-1` which will always return
+ * generated. This parameter isn't supported for the GPT image models, which always return
* base64-encoded images.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
@@ -137,7 +138,7 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, one of `256x256`,
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, one of `256x256`,
* `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792`
* for `dall-e-3`.
*
@@ -312,7 +313,7 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 32000 characters for
- * `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+ * the GPT image models, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
*/
fun prompt(prompt: String) = apply { body.prompt(prompt) }
@@ -326,9 +327,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter
- * is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto`
- * (default value). When `auto` is used, the model will automatically determine the best
- * background for the image.
+ * is only supported for the GPT image models. Must be one of `transparent`, `opaque` or
+ * `auto` (default value). When `auto` is used, the model will automatically determine the
+ * best background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -348,8 +349,9 @@ private constructor(
fun background(background: JsonField) = apply { body.background(background) }
/**
- * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`.
- * Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
+ * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT image
+ * model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to `dall-e-2` unless
+ * a parameter specific to the GPT image models is used.
*/
fun model(model: ImageModel?) = apply { body.model(model) }
@@ -374,8 +376,8 @@ private constructor(
fun model(value: String) = apply { body.model(value) }
/**
- * Control the content-moderation level for images generated by `gpt-image-1`. Must be
- * either `low` for less restrictive filtering or `auto` (default value).
+ * Control the content-moderation level for images generated by the GPT image models. Must
+ * be either `low` for less restrictive filtering or `auto` (default value).
*/
fun moderation(moderation: Moderation?) = apply { body.moderation(moderation) }
@@ -417,7 +419,7 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only supported
- * for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
+ * for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100.
*/
fun outputCompression(outputCompression: Long?) = apply {
body.outputCompression(outputCompression)
@@ -448,7 +450,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported
- * for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ * for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
*/
fun outputFormat(outputFormat: OutputFormat?) = apply { body.outputFormat(outputFormat) }
@@ -501,7 +503,7 @@ private constructor(
/**
* The quality of the image that will be generated.
* - `auto` (default value) will automatically select the best quality for the given model.
- * - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ * - `high`, `medium` and `low` are supported for the GPT image models.
* - `hd` and `standard` are supported for `dall-e-3`.
* - `standard` is the only option for `dall-e-2`.
*/
@@ -521,7 +523,7 @@ private constructor(
/**
* The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be
* one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been
- * generated. This parameter isn't supported for `gpt-image-1` which will always return
+ * generated. This parameter isn't supported for the GPT image models, which always return
* base64-encoded images.
*/
fun responseFormat(responseFormat: ResponseFormat?) = apply {
@@ -545,9 +547,9 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, one of `256x256`,
- * `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or
- * `1024x1792` for `dall-e-3`.
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, one of
+ * `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`,
+ * or `1024x1792` for `dall-e-3`.
*/
fun size(size: Size?) = apply { body.size(size) }
@@ -804,7 +806,7 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 32000 characters for
- * `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+ * the GPT image models, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
* unexpectedly missing or null (e.g. if the server responded with an unexpected value).
@@ -813,9 +815,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter
- * is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto`
- * (default value). When `auto` is used, the model will automatically determine the best
- * background for the image.
+ * is only supported for the GPT image models. Must be one of `transparent`, `opaque` or
+ * `auto` (default value). When `auto` is used, the model will automatically determine the
+ * best background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -826,8 +828,9 @@ private constructor(
fun background(): Optional = background.getOptional("background")
/**
- * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`.
- * Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
+ * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT image
+ * model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to `dall-e-2` unless
+ * a parameter specific to the GPT image models is used.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -835,8 +838,8 @@ private constructor(
fun model(): Optional = model.getOptional("model")
/**
- * Control the content-moderation level for images generated by `gpt-image-1`. Must be
- * either `low` for less restrictive filtering or `auto` (default value).
+ * Control the content-moderation level for images generated by the GPT image models. Must
+ * be either `low` for less restrictive filtering or `auto` (default value).
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -854,7 +857,7 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only supported
- * for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
+ * for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -864,7 +867,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported
- * for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ * for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -887,7 +890,7 @@ private constructor(
/**
* The quality of the image that will be generated.
* - `auto` (default value) will automatically select the best quality for the given model.
- * - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ * - `high`, `medium` and `low` are supported for the GPT image models.
* - `hd` and `standard` are supported for `dall-e-3`.
* - `standard` is the only option for `dall-e-2`.
*
@@ -899,7 +902,7 @@ private constructor(
/**
* The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be
* one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been
- * generated. This parameter isn't supported for `gpt-image-1` which will always return
+ * generated. This parameter isn't supported for the GPT image models, which always return
* base64-encoded images.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
@@ -910,9 +913,9 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, one of `256x256`,
- * `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or
- * `1024x1792` for `dall-e-3`.
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, one of
+ * `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`,
+ * or `1024x1792` for `dall-e-3`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1110,7 +1113,8 @@ private constructor(
/**
* A text description of the desired image(s). The maximum length is 32000 characters
- * for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+ * for the GPT image models, 1000 characters for `dall-e-2` and 4000 characters for
+ * `dall-e-3`.
*/
fun prompt(prompt: String) = prompt(JsonField.of(prompt))
@@ -1125,8 +1129,8 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This
- * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque`
- * or `auto` (default value). When `auto` is used, the model will automatically
+ * parameter is only supported for the GPT image models. Must be one of `transparent`,
+ * `opaque` or `auto` (default value). When `auto` is used, the model will automatically
* determine the best background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be
@@ -1149,9 +1153,9 @@ private constructor(
}
/**
- * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- * `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is
- * used.
+ * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT image
+ * model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to `dall-e-2`
+ * unless a parameter specific to the GPT image models is used.
*/
fun model(model: ImageModel?) = model(JsonField.ofNullable(model))
@@ -1177,8 +1181,8 @@ private constructor(
fun model(value: String) = model(ImageModel.of(value))
/**
- * Control the content-moderation level for images generated by `gpt-image-1`. Must be
- * either `low` for less restrictive filtering or `auto` (default value).
+ * Control the content-moderation level for images generated by the GPT image models.
+ * Must be either `low` for less restrictive filtering or `auto` (default value).
*/
fun moderation(moderation: Moderation?) = moderation(JsonField.ofNullable(moderation))
@@ -1223,8 +1227,8 @@ private constructor(
/**
* The compression level (0-100%) for the generated images. This parameter is only
- * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults
- * to 100.
+ * supported for the GPT image models with the `webp` or `jpeg` output formats, and
+ * defaults to 100.
*/
fun outputCompression(outputCompression: Long?) =
outputCompression(JsonField.ofNullable(outputCompression))
@@ -1256,7 +1260,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only
- * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ * supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
*/
fun outputFormat(outputFormat: OutputFormat?) =
outputFormat(JsonField.ofNullable(outputFormat))
@@ -1313,7 +1317,7 @@ private constructor(
* The quality of the image that will be generated.
* - `auto` (default value) will automatically select the best quality for the given
* model.
- * - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ * - `high`, `medium` and `low` are supported for the GPT image models.
* - `hd` and `standard` are supported for `dall-e-3`.
* - `standard` is the only option for `dall-e-2`.
*/
@@ -1334,8 +1338,8 @@ private constructor(
/**
* The format in which generated images with `dall-e-2` and `dall-e-3` are returned.
* Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- * image has been generated. This parameter isn't supported for `gpt-image-1` which will
- * always return base64-encoded images.
+ * image has been generated. This parameter isn't supported for the GPT image models,
+ * which always return base64-encoded images.
*/
fun responseFormat(responseFormat: ResponseFormat?) =
responseFormat(JsonField.ofNullable(responseFormat))
@@ -1357,9 +1361,9 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- * (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, one
- * of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`,
- * `1792x1024`, or `1024x1792` for `dall-e-3`.
+ * (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
+ * models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
+ * `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
*/
fun size(size: Size?) = size(JsonField.ofNullable(size))
@@ -1563,9 +1567,9 @@ private constructor(
/**
* Allows to set transparency for the background of the generated image(s). This parameter is
- * only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto` (default
- * value). When `auto` is used, the model will automatically determine the best background for
- * the image.
+ * only supported for the GPT image models. Must be one of `transparent`, `opaque` or `auto`
+ * (default value). When `auto` is used, the model will automatically determine the best
+ * background for the image.
*
* If `transparent`, the output format needs to support transparency, so it should be set to
* either `png` (default value) or `webp`.
@@ -1704,8 +1708,8 @@ private constructor(
}
/**
- * Control the content-moderation level for images generated by `gpt-image-1`. Must be either
- * `low` for less restrictive filtering or `auto` (default value).
+ * Control the content-moderation level for images generated by the GPT image models. Must be
+ * either `low` for less restrictive filtering or `auto` (default value).
*/
class Moderation @JsonCreator private constructor(private val value: JsonField) : Enum {
@@ -1836,7 +1840,7 @@ private constructor(
/**
* The format in which the generated images are returned. This parameter is only supported for
- * `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ * the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
*/
class OutputFormat @JsonCreator private constructor(private val value: JsonField) :
Enum {
@@ -1975,7 +1979,7 @@ private constructor(
/**
* The quality of the image that will be generated.
* - `auto` (default value) will automatically select the best quality for the given model.
- * - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ * - `high`, `medium` and `low` are supported for the GPT image models.
* - `hd` and `standard` are supported for `dall-e-3`.
* - `standard` is the only option for `dall-e-2`.
*/
@@ -2131,7 +2135,7 @@ private constructor(
/**
* The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one
* of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been
- * generated. This parameter isn't supported for `gpt-image-1` which will always return
+ * generated. This parameter isn't supported for the GPT image models, which always return
* base64-encoded images.
*/
class ResponseFormat @JsonCreator private constructor(private val value: JsonField) :
@@ -2265,7 +2269,7 @@ private constructor(
/**
* The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape),
- * `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, one of `256x256`,
+ * `1024x1536` (portrait), or `auto` (default value) for the GPT image models, one of `256x256`,
* `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792`
* for `dall-e-3`.
*/
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageModel.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageModel.kt
index 8d547ad1..f94c0c4d 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/ImageModel.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/ImageModel.kt
@@ -20,6 +20,8 @@ class ImageModel @JsonCreator private constructor(private val value: JsonField Value.GPT_IMAGE_1_5
DALL_E_2 -> Value.DALL_E_2
DALL_E_3 -> Value.DALL_E_3
GPT_IMAGE_1 -> Value.GPT_IMAGE_1
@@ -83,6 +88,7 @@ class ImageModel @JsonCreator private constructor(private val value: JsonField Known.GPT_IMAGE_1_5
DALL_E_2 -> Known.DALL_E_2
DALL_E_3 -> Known.DALL_E_3
GPT_IMAGE_1 -> Known.GPT_IMAGE_1
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/images/ImagesResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/images/ImagesResponse.kt
index 04ca5515..03ef7dc3 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/images/ImagesResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/images/ImagesResponse.kt
@@ -924,6 +924,7 @@ private constructor(
private val inputTokensDetails: JsonField,
private val outputTokens: JsonField,
private val totalTokens: JsonField,
+ private val outputTokensDetails: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -941,7 +942,17 @@ private constructor(
@JsonProperty("total_tokens")
@ExcludeMissing
totalTokens: JsonField = JsonMissing.of(),
- ) : this(inputTokens, inputTokensDetails, outputTokens, totalTokens, mutableMapOf())
+ @JsonProperty("output_tokens_details")
+ @ExcludeMissing
+ outputTokensDetails: JsonField = JsonMissing.of(),
+ ) : this(
+ inputTokens,
+ inputTokensDetails,
+ outputTokens,
+ totalTokens,
+ outputTokensDetails,
+ mutableMapOf(),
+ )
/**
* The number of tokens (images and text) in the input prompt.
@@ -976,6 +987,15 @@ private constructor(
*/
fun totalTokens(): Long = totalTokens.getRequired("total_tokens")
+ /**
+ * The output token details for the image generation.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun outputTokensDetails(): Optional =
+ outputTokensDetails.getOptional("output_tokens_details")
+
/**
* Returns the raw JSON value of [inputTokens].
*
@@ -1014,6 +1034,16 @@ private constructor(
@ExcludeMissing
fun _totalTokens(): JsonField = totalTokens
+ /**
+ * Returns the raw JSON value of [outputTokensDetails].
+ *
+ * Unlike [outputTokensDetails], this method doesn't throw if the JSON field has an
+ * unexpected type.
+ */
+ @JsonProperty("output_tokens_details")
+ @ExcludeMissing
+ fun _outputTokensDetails(): JsonField = outputTokensDetails
+
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -1049,6 +1079,7 @@ private constructor(
private var inputTokensDetails: JsonField? = null
private var outputTokens: JsonField? = null
private var totalTokens: JsonField? = null
+ private var outputTokensDetails: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
@@ -1057,6 +1088,7 @@ private constructor(
inputTokensDetails = usage.inputTokensDetails
outputTokens = usage.outputTokens
totalTokens = usage.totalTokens
+ outputTokensDetails = usage.outputTokensDetails
additionalProperties = usage.additionalProperties.toMutableMap()
}
@@ -1113,6 +1145,21 @@ private constructor(
*/
fun totalTokens(totalTokens: JsonField) = apply { this.totalTokens = totalTokens }
+ /** The output token details for the image generation. */
+ fun outputTokensDetails(outputTokensDetails: OutputTokensDetails) =
+ outputTokensDetails(JsonField.of(outputTokensDetails))
+
+ /**
+ * Sets [Builder.outputTokensDetails] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.outputTokensDetails] with a well-typed
+ * [OutputTokensDetails] value instead. This method is primarily for setting the field
+ * to an undocumented or not yet supported value.
+ */
+ fun outputTokensDetails(outputTokensDetails: JsonField) = apply {
+ this.outputTokensDetails = outputTokensDetails
+ }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -1153,6 +1200,7 @@ private constructor(
checkRequired("inputTokensDetails", inputTokensDetails),
checkRequired("outputTokens", outputTokens),
checkRequired("totalTokens", totalTokens),
+ outputTokensDetails,
additionalProperties.toMutableMap(),
)
}
@@ -1168,6 +1216,7 @@ private constructor(
inputTokensDetails().validate()
outputTokens()
totalTokens()
+ outputTokensDetails().ifPresent { it.validate() }
validated = true
}
@@ -1190,7 +1239,8 @@ private constructor(
(if (inputTokens.asKnown().isPresent) 1 else 0) +
(inputTokensDetails.asKnown().getOrNull()?.validity() ?: 0) +
(if (outputTokens.asKnown().isPresent) 1 else 0) +
- (if (totalTokens.asKnown().isPresent) 1 else 0)
+ (if (totalTokens.asKnown().isPresent) 1 else 0) +
+ (outputTokensDetails.asKnown().getOrNull()?.validity() ?: 0)
/** The input tokens detailed information for the image generation. */
class InputTokensDetails
@@ -1410,6 +1460,224 @@ private constructor(
"InputTokensDetails{imageTokens=$imageTokens, textTokens=$textTokens, additionalProperties=$additionalProperties}"
}
+ /** The output token details for the image generation. */
+ class OutputTokensDetails
+ @JsonCreator(mode = JsonCreator.Mode.DISABLED)
+ private constructor(
+ private val imageTokens: JsonField,
+ private val textTokens: JsonField,
+ private val additionalProperties: MutableMap,
+ ) {
+
+ @JsonCreator
+ private constructor(
+ @JsonProperty("image_tokens")
+ @ExcludeMissing
+ imageTokens: JsonField = JsonMissing.of(),
+ @JsonProperty("text_tokens")
+ @ExcludeMissing
+ textTokens: JsonField = JsonMissing.of(),
+ ) : this(imageTokens, textTokens, mutableMapOf())
+
+ /**
+ * The number of image output tokens generated by the model.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected
+ * value).
+ */
+ fun imageTokens(): Long = imageTokens.getRequired("image_tokens")
+
+ /**
+ * The number of text output tokens generated by the model.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected
+ * value).
+ */
+ fun textTokens(): Long = textTokens.getRequired("text_tokens")
+
+ /**
+ * Returns the raw JSON value of [imageTokens].
+ *
+ * Unlike [imageTokens], this method doesn't throw if the JSON field has an unexpected
+ * type.
+ */
+ @JsonProperty("image_tokens")
+ @ExcludeMissing
+ fun _imageTokens(): JsonField = imageTokens
+
+ /**
+ * Returns the raw JSON value of [textTokens].
+ *
+ * Unlike [textTokens], this method doesn't throw if the JSON field has an unexpected
+ * type.
+ */
+ @JsonProperty("text_tokens")
+ @ExcludeMissing
+ fun _textTokens(): JsonField = textTokens
+
+ @JsonAnySetter
+ private fun putAdditionalProperty(key: String, value: JsonValue) {
+ additionalProperties.put(key, value)
+ }
+
+ @JsonAnyGetter
+ @ExcludeMissing
+ fun _additionalProperties(): Map =
+ Collections.unmodifiableMap(additionalProperties)
+
+ fun toBuilder() = Builder().from(this)
+
+ companion object {
+
+ /**
+ * Returns a mutable builder for constructing an instance of [OutputTokensDetails].
+ *
+ * The following fields are required:
+ * ```java
+ * .imageTokens()
+ * .textTokens()
+ * ```
+ */
+ @JvmStatic fun builder() = Builder()
+ }
+
+ /** A builder for [OutputTokensDetails]. */
+ class Builder internal constructor() {
+
+ private var imageTokens: JsonField? = null
+ private var textTokens: JsonField? = null
+ private var additionalProperties: MutableMap = mutableMapOf()
+
+ @JvmSynthetic
+ internal fun from(outputTokensDetails: OutputTokensDetails) = apply {
+ imageTokens = outputTokensDetails.imageTokens
+ textTokens = outputTokensDetails.textTokens
+ additionalProperties = outputTokensDetails.additionalProperties.toMutableMap()
+ }
+
+ /** The number of image output tokens generated by the model. */
+ fun imageTokens(imageTokens: Long) = imageTokens(JsonField.of(imageTokens))
+
+ /**
+ * Sets [Builder.imageTokens] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.imageTokens] with a well-typed [Long] value
+ * instead. This method is primarily for setting the field to an undocumented or not
+ * yet supported value.
+ */
+ fun imageTokens(imageTokens: JsonField) = apply {
+ this.imageTokens = imageTokens
+ }
+
+ /** The number of text output tokens generated by the model. */
+ fun textTokens(textTokens: Long) = textTokens(JsonField.of(textTokens))
+
+ /**
+ * Sets [Builder.textTokens] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.textTokens] with a well-typed [Long] value
+ * instead. This method is primarily for setting the field to an undocumented or not
+ * yet supported value.
+ */
+ fun textTokens(textTokens: JsonField) = apply { this.textTokens = textTokens }
+
+ fun additionalProperties(additionalProperties: Map) = apply {
+ this.additionalProperties.clear()
+ putAllAdditionalProperties(additionalProperties)
+ }
+
+ fun putAdditionalProperty(key: String, value: JsonValue) = apply {
+ additionalProperties.put(key, value)
+ }
+
+ fun putAllAdditionalProperties(additionalProperties: Map) =
+ apply {
+ this.additionalProperties.putAll(additionalProperties)
+ }
+
+ fun removeAdditionalProperty(key: String) = apply {
+ additionalProperties.remove(key)
+ }
+
+ fun removeAllAdditionalProperties(keys: Set) = apply {
+ keys.forEach(::removeAdditionalProperty)
+ }
+
+ /**
+ * Returns an immutable instance of [OutputTokensDetails].
+ *
+ * Further updates to this [Builder] will not mutate the returned instance.
+ *
+ * The following fields are required:
+ * ```java
+ * .imageTokens()
+ * .textTokens()
+ * ```
+ *
+ * @throws IllegalStateException if any required field is unset.
+ */
+ fun build(): OutputTokensDetails =
+ OutputTokensDetails(
+ checkRequired("imageTokens", imageTokens),
+ checkRequired("textTokens", textTokens),
+ additionalProperties.toMutableMap(),
+ )
+ }
+
+ private var validated: Boolean = false
+
+ fun validate(): OutputTokensDetails = apply {
+ if (validated) {
+ return@apply
+ }
+
+ imageTokens()
+ textTokens()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object
+ * recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic
+ internal fun validity(): Int =
+ (if (imageTokens.asKnown().isPresent) 1 else 0) +
+ (if (textTokens.asKnown().isPresent) 1 else 0)
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is OutputTokensDetails &&
+ imageTokens == other.imageTokens &&
+ textTokens == other.textTokens &&
+ additionalProperties == other.additionalProperties
+ }
+
+ private val hashCode: Int by lazy {
+ Objects.hash(imageTokens, textTokens, additionalProperties)
+ }
+
+ override fun hashCode(): Int = hashCode
+
+ override fun toString() =
+ "OutputTokensDetails{imageTokens=$imageTokens, textTokens=$textTokens, additionalProperties=$additionalProperties}"
+ }
+
override fun equals(other: Any?): Boolean {
if (this === other) {
return true
@@ -1420,6 +1688,7 @@ private constructor(
inputTokensDetails == other.inputTokensDetails &&
outputTokens == other.outputTokens &&
totalTokens == other.totalTokens &&
+ outputTokensDetails == other.outputTokensDetails &&
additionalProperties == other.additionalProperties
}
@@ -1429,6 +1698,7 @@ private constructor(
inputTokensDetails,
outputTokens,
totalTokens,
+ outputTokensDetails,
additionalProperties,
)
}
@@ -1436,7 +1706,7 @@ private constructor(
override fun hashCode(): Int = hashCode
override fun toString() =
- "Usage{inputTokens=$inputTokens, inputTokensDetails=$inputTokensDetails, outputTokens=$outputTokens, totalTokens=$totalTokens, additionalProperties=$additionalProperties}"
+ "Usage{inputTokens=$inputTokens, inputTokensDetails=$inputTokensDetails, outputTokens=$outputTokens, totalTokens=$totalTokens, outputTokensDetails=$outputTokensDetails, additionalProperties=$additionalProperties}"
}
override fun equals(other: Any?): Boolean {
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/AudioTranscription.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/AudioTranscription.kt
index cc8da45a..7e6dfe95 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/AudioTranscription.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/AudioTranscription.kt
@@ -15,7 +15,6 @@ import com.openai.errors.OpenAIInvalidDataException
import java.util.Collections
import java.util.Objects
import java.util.Optional
-import kotlin.jvm.optionals.getOrNull
class AudioTranscription
@JsonCreator(mode = JsonCreator.Mode.DISABLED)
@@ -45,8 +44,9 @@ private constructor(
/**
* The model to use for transcription. Current options are `whisper-1`,
- * `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
- * `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
+ * `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and
+ * `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need diarization with
+ * speaker labels.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -137,8 +137,9 @@ private constructor(
/**
* The model to use for transcription. Current options are `whisper-1`,
- * `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
- * `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
+ * `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and
+ * `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need diarization
+ * with speaker labels.
*/
fun model(model: Model) = model(JsonField.of(model))
@@ -150,6 +151,14 @@ private constructor(
*/
fun model(model: JsonField) = apply { this.model = model }
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [Model] constant instead. This method
+ * is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun model(value: String) = model(Model.of(value))
+
/**
* An optional text to guide the model's style or continue a previous audio segment. For
* `whisper-1`, the
@@ -203,7 +212,7 @@ private constructor(
}
language()
- model().ifPresent { it.validate() }
+ model()
prompt()
validated = true
}
@@ -224,13 +233,14 @@ private constructor(
@JvmSynthetic
internal fun validity(): Int =
(if (language.asKnown().isPresent) 1 else 0) +
- (model.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (model.asKnown().isPresent) 1 else 0) +
(if (prompt.asKnown().isPresent) 1 else 0)
/**
* The model to use for transcription. Current options are `whisper-1`,
- * `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
- * `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
+ * `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and
+ * `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need diarization with
+ * speaker labels.
*/
class Model @JsonCreator private constructor(private val value: JsonField) : Enum {
@@ -250,6 +260,9 @@ private constructor(
@JvmField val GPT_4O_MINI_TRANSCRIBE = of("gpt-4o-mini-transcribe")
+ @JvmField
+ val GPT_4O_MINI_TRANSCRIBE_2025_12_15 = of("gpt-4o-mini-transcribe-2025-12-15")
+
@JvmField val GPT_4O_TRANSCRIBE = of("gpt-4o-transcribe")
@JvmField val GPT_4O_TRANSCRIBE_DIARIZE = of("gpt-4o-transcribe-diarize")
@@ -261,6 +274,7 @@ private constructor(
enum class Known {
WHISPER_1,
GPT_4O_MINI_TRANSCRIBE,
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15,
GPT_4O_TRANSCRIBE,
GPT_4O_TRANSCRIBE_DIARIZE,
}
@@ -277,6 +291,7 @@ private constructor(
enum class Value {
WHISPER_1,
GPT_4O_MINI_TRANSCRIBE,
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15,
GPT_4O_TRANSCRIBE,
GPT_4O_TRANSCRIBE_DIARIZE,
/** An enum member indicating that [Model] was instantiated with an unknown value. */
@@ -294,6 +309,7 @@ private constructor(
when (this) {
WHISPER_1 -> Value.WHISPER_1
GPT_4O_MINI_TRANSCRIBE -> Value.GPT_4O_MINI_TRANSCRIBE
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 -> Value.GPT_4O_MINI_TRANSCRIBE_2025_12_15
GPT_4O_TRANSCRIBE -> Value.GPT_4O_TRANSCRIBE
GPT_4O_TRANSCRIBE_DIARIZE -> Value.GPT_4O_TRANSCRIBE_DIARIZE
else -> Value._UNKNOWN
@@ -312,6 +328,7 @@ private constructor(
when (this) {
WHISPER_1 -> Known.WHISPER_1
GPT_4O_MINI_TRANSCRIBE -> Known.GPT_4O_MINI_TRANSCRIBE
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 -> Known.GPT_4O_MINI_TRANSCRIBE_2025_12_15
GPT_4O_TRANSCRIBE -> Known.GPT_4O_TRANSCRIBE
GPT_4O_TRANSCRIBE_DIARIZE -> Known.GPT_4O_TRANSCRIBE_DIARIZE
else -> throw OpenAIInvalidDataException("Unknown Model: $value")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt
index 88bc0c56..a788eb69 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt
@@ -788,6 +788,14 @@ private constructor(
*/
fun model(model: JsonField) = apply { this.model = model }
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [Model] constant instead. This method
+ * is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun model(value: String) = model(Model.of(value))
+
/** The object type. Always `realtime.session`. */
fun object_(object_: Object) = object_(JsonField.of(object_))
@@ -1060,7 +1068,7 @@ private constructor(
instructions()
maxResponseOutputTokens().ifPresent { it.validate() }
modalities().ifPresent { it.forEach { it.validate() } }
- model().ifPresent { it.validate() }
+ model()
object_().ifPresent { it.validate() }
outputAudioFormat().ifPresent { it.validate() }
prompt().ifPresent { it.validate() }
@@ -1098,7 +1106,7 @@ private constructor(
(if (instructions.asKnown().isPresent) 1 else 0) +
(maxResponseOutputTokens.asKnown().getOrNull()?.validity() ?: 0) +
(modalities.asKnown().getOrNull()?.sumOf { it.validity().toInt() } ?: 0) +
- (model.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (model.asKnown().isPresent) 1 else 0) +
(object_.asKnown().getOrNull()?.validity() ?: 0) +
(outputAudioFormat.asKnown().getOrNull()?.validity() ?: 0) +
(prompt.asKnown().getOrNull()?.validity() ?: 0) +
@@ -1880,10 +1888,14 @@ private constructor(
@JvmField val GPT_REALTIME_MINI_2025_10_06 = of("gpt-realtime-mini-2025-10-06")
+ @JvmField val GPT_REALTIME_MINI_2025_12_15 = of("gpt-realtime-mini-2025-12-15")
+
@JvmField val GPT_AUDIO_MINI = of("gpt-audio-mini")
@JvmField val GPT_AUDIO_MINI_2025_10_06 = of("gpt-audio-mini-2025-10-06")
+ @JvmField val GPT_AUDIO_MINI_2025_12_15 = of("gpt-audio-mini-2025-12-15")
+
@JvmStatic fun of(value: String) = Model(JsonField.of(value))
}
@@ -1899,8 +1911,10 @@ private constructor(
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17,
GPT_REALTIME_MINI,
GPT_REALTIME_MINI_2025_10_06,
+ GPT_REALTIME_MINI_2025_12_15,
GPT_AUDIO_MINI,
GPT_AUDIO_MINI_2025_10_06,
+ GPT_AUDIO_MINI_2025_12_15,
}
/**
@@ -1923,8 +1937,10 @@ private constructor(
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17,
GPT_REALTIME_MINI,
GPT_REALTIME_MINI_2025_10_06,
+ GPT_REALTIME_MINI_2025_12_15,
GPT_AUDIO_MINI,
GPT_AUDIO_MINI_2025_10_06,
+ GPT_AUDIO_MINI_2025_12_15,
/** An enum member indicating that [Model] was instantiated with an unknown value. */
_UNKNOWN,
}
@@ -1949,8 +1965,10 @@ private constructor(
Value.GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17
GPT_REALTIME_MINI -> Value.GPT_REALTIME_MINI
GPT_REALTIME_MINI_2025_10_06 -> Value.GPT_REALTIME_MINI_2025_10_06
+ GPT_REALTIME_MINI_2025_12_15 -> Value.GPT_REALTIME_MINI_2025_12_15
GPT_AUDIO_MINI -> Value.GPT_AUDIO_MINI
GPT_AUDIO_MINI_2025_10_06 -> Value.GPT_AUDIO_MINI_2025_10_06
+ GPT_AUDIO_MINI_2025_12_15 -> Value.GPT_AUDIO_MINI_2025_12_15
else -> Value._UNKNOWN
}
@@ -1976,8 +1994,10 @@ private constructor(
Known.GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17
GPT_REALTIME_MINI -> Known.GPT_REALTIME_MINI
GPT_REALTIME_MINI_2025_10_06 -> Known.GPT_REALTIME_MINI_2025_10_06
+ GPT_REALTIME_MINI_2025_12_15 -> Known.GPT_REALTIME_MINI_2025_12_15
GPT_AUDIO_MINI -> Known.GPT_AUDIO_MINI
GPT_AUDIO_MINI_2025_10_06 -> Known.GPT_AUDIO_MINI_2025_10_06
+ GPT_AUDIO_MINI_2025_12_15 -> Known.GPT_AUDIO_MINI_2025_12_15
else -> throw OpenAIInvalidDataException("Unknown Model: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt
index b6b530d8..8bc64abd 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt
@@ -1160,10 +1160,14 @@ private constructor(
@JvmField val GPT_REALTIME_MINI_2025_10_06 = of("gpt-realtime-mini-2025-10-06")
+ @JvmField val GPT_REALTIME_MINI_2025_12_15 = of("gpt-realtime-mini-2025-12-15")
+
@JvmField val GPT_AUDIO_MINI = of("gpt-audio-mini")
@JvmField val GPT_AUDIO_MINI_2025_10_06 = of("gpt-audio-mini-2025-10-06")
+ @JvmField val GPT_AUDIO_MINI_2025_12_15 = of("gpt-audio-mini-2025-12-15")
+
@JvmStatic fun of(value: String) = Model(JsonField.of(value))
}
@@ -1179,8 +1183,10 @@ private constructor(
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17,
GPT_REALTIME_MINI,
GPT_REALTIME_MINI_2025_10_06,
+ GPT_REALTIME_MINI_2025_12_15,
GPT_AUDIO_MINI,
GPT_AUDIO_MINI_2025_10_06,
+ GPT_AUDIO_MINI_2025_12_15,
}
/**
@@ -1203,8 +1209,10 @@ private constructor(
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17,
GPT_REALTIME_MINI,
GPT_REALTIME_MINI_2025_10_06,
+ GPT_REALTIME_MINI_2025_12_15,
GPT_AUDIO_MINI,
GPT_AUDIO_MINI_2025_10_06,
+ GPT_AUDIO_MINI_2025_12_15,
/** An enum member indicating that [Model] was instantiated with an unknown value. */
_UNKNOWN,
}
@@ -1229,8 +1237,10 @@ private constructor(
Value.GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17
GPT_REALTIME_MINI -> Value.GPT_REALTIME_MINI
GPT_REALTIME_MINI_2025_10_06 -> Value.GPT_REALTIME_MINI_2025_10_06
+ GPT_REALTIME_MINI_2025_12_15 -> Value.GPT_REALTIME_MINI_2025_12_15
GPT_AUDIO_MINI -> Value.GPT_AUDIO_MINI
GPT_AUDIO_MINI_2025_10_06 -> Value.GPT_AUDIO_MINI_2025_10_06
+ GPT_AUDIO_MINI_2025_12_15 -> Value.GPT_AUDIO_MINI_2025_12_15
else -> Value._UNKNOWN
}
@@ -1256,8 +1266,10 @@ private constructor(
Known.GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17
GPT_REALTIME_MINI -> Known.GPT_REALTIME_MINI
GPT_REALTIME_MINI_2025_10_06 -> Known.GPT_REALTIME_MINI_2025_10_06
+ GPT_REALTIME_MINI_2025_12_15 -> Known.GPT_REALTIME_MINI_2025_12_15
GPT_AUDIO_MINI -> Known.GPT_AUDIO_MINI
GPT_AUDIO_MINI_2025_10_06 -> Known.GPT_AUDIO_MINI_2025_10_06
+ GPT_AUDIO_MINI_2025_12_15 -> Known.GPT_AUDIO_MINI_2025_12_15
else -> throw OpenAIInvalidDataException("Unknown Model: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt
index 181b2f86..f1d2b95b 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt
@@ -3571,10 +3571,14 @@ private constructor(
@JvmField val GPT_REALTIME_MINI_2025_10_06 = of("gpt-realtime-mini-2025-10-06")
+ @JvmField val GPT_REALTIME_MINI_2025_12_15 = of("gpt-realtime-mini-2025-12-15")
+
@JvmField val GPT_AUDIO_MINI = of("gpt-audio-mini")
@JvmField val GPT_AUDIO_MINI_2025_10_06 = of("gpt-audio-mini-2025-10-06")
+ @JvmField val GPT_AUDIO_MINI_2025_12_15 = of("gpt-audio-mini-2025-12-15")
+
@JvmStatic fun of(value: String) = Model(JsonField.of(value))
}
@@ -3590,8 +3594,10 @@ private constructor(
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17,
GPT_REALTIME_MINI,
GPT_REALTIME_MINI_2025_10_06,
+ GPT_REALTIME_MINI_2025_12_15,
GPT_AUDIO_MINI,
GPT_AUDIO_MINI_2025_10_06,
+ GPT_AUDIO_MINI_2025_12_15,
}
/**
@@ -3614,8 +3620,10 @@ private constructor(
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17,
GPT_REALTIME_MINI,
GPT_REALTIME_MINI_2025_10_06,
+ GPT_REALTIME_MINI_2025_12_15,
GPT_AUDIO_MINI,
GPT_AUDIO_MINI_2025_10_06,
+ GPT_AUDIO_MINI_2025_12_15,
/** An enum member indicating that [Model] was instantiated with an unknown value. */
_UNKNOWN,
}
@@ -3640,8 +3648,10 @@ private constructor(
Value.GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17
GPT_REALTIME_MINI -> Value.GPT_REALTIME_MINI
GPT_REALTIME_MINI_2025_10_06 -> Value.GPT_REALTIME_MINI_2025_10_06
+ GPT_REALTIME_MINI_2025_12_15 -> Value.GPT_REALTIME_MINI_2025_12_15
GPT_AUDIO_MINI -> Value.GPT_AUDIO_MINI
GPT_AUDIO_MINI_2025_10_06 -> Value.GPT_AUDIO_MINI_2025_10_06
+ GPT_AUDIO_MINI_2025_12_15 -> Value.GPT_AUDIO_MINI_2025_12_15
else -> Value._UNKNOWN
}
@@ -3667,8 +3677,10 @@ private constructor(
Known.GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17
GPT_REALTIME_MINI -> Known.GPT_REALTIME_MINI
GPT_REALTIME_MINI_2025_10_06 -> Known.GPT_REALTIME_MINI_2025_10_06
+ GPT_REALTIME_MINI_2025_12_15 -> Known.GPT_REALTIME_MINI_2025_12_15
GPT_AUDIO_MINI -> Known.GPT_AUDIO_MINI
GPT_AUDIO_MINI_2025_10_06 -> Known.GPT_AUDIO_MINI_2025_10_06
+ GPT_AUDIO_MINI_2025_12_15 -> Known.GPT_AUDIO_MINI_2025_12_15
else -> throw OpenAIInvalidDataException("Unknown Model: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/responses/Tool.kt b/openai-java-core/src/main/kotlin/com/openai/models/responses/Tool.kt
index b43d5333..0fcb0784 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/responses/Tool.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/responses/Tool.kt
@@ -84,7 +84,7 @@ private constructor(
/** A tool that runs Python code to help generate a response to a prompt. */
fun codeInterpreter(): Optional = Optional.ofNullable(codeInterpreter)
- /** A tool that generates images using a model like `gpt-image-1`. */
+ /** A tool that generates images using the GPT image models. */
fun imageGeneration(): Optional = Optional.ofNullable(imageGeneration)
/** A tool that allows the model to execute shell commands in a local environment. */
@@ -165,7 +165,7 @@ private constructor(
/** A tool that runs Python code to help generate a response to a prompt. */
fun asCodeInterpreter(): CodeInterpreter = codeInterpreter.getOrThrow("codeInterpreter")
- /** A tool that generates images using a model like `gpt-image-1`. */
+ /** A tool that generates images using the GPT image models. */
fun asImageGeneration(): ImageGeneration = imageGeneration.getOrThrow("imageGeneration")
/** A tool that allows the model to execute shell commands in a local environment. */
@@ -421,7 +421,7 @@ private constructor(
fun ofCodeInterpreter(codeInterpreter: CodeInterpreter) =
Tool(codeInterpreter = codeInterpreter)
- /** A tool that generates images using a model like `gpt-image-1`. */
+ /** A tool that generates images using the GPT image models. */
@JvmStatic
fun ofImageGeneration(imageGeneration: ImageGeneration) =
Tool(imageGeneration = imageGeneration)
@@ -488,7 +488,7 @@ private constructor(
/** A tool that runs Python code to help generate a response to a prompt. */
fun visitCodeInterpreter(codeInterpreter: CodeInterpreter): T
- /** A tool that generates images using a model like `gpt-image-1`. */
+ /** A tool that generates images using the GPT image models. */
fun visitImageGeneration(imageGeneration: ImageGeneration): T
/** A tool that allows the model to execute shell commands in a local environment. */
@@ -3790,7 +3790,7 @@ private constructor(
"CodeInterpreter{container=$container, type=$type, additionalProperties=$additionalProperties}"
}
- /** A tool that generates images using a model like `gpt-image-1`. */
+ /** A tool that generates images using the GPT image models. */
class ImageGeneration
@JsonCreator(mode = JsonCreator.Mode.DISABLED)
private constructor(
@@ -4176,6 +4176,15 @@ private constructor(
*/
fun model(model: JsonField) = apply { this.model = model }
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [Model] constant instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun model(value: String) = model(Model.of(value))
+
/** Moderation level for the generated image. Default: `auto`. */
fun moderation(moderation: Moderation) = moderation(JsonField.of(moderation))
@@ -4324,7 +4333,7 @@ private constructor(
background().ifPresent { it.validate() }
inputFidelity().ifPresent { it.validate() }
inputImageMask().ifPresent { it.validate() }
- model().ifPresent { it.validate() }
+ model()
moderation().ifPresent { it.validate() }
outputCompression()
outputFormat().ifPresent { it.validate() }
@@ -4354,7 +4363,7 @@ private constructor(
(background.asKnown().getOrNull()?.validity() ?: 0) +
(inputFidelity.asKnown().getOrNull()?.validity() ?: 0) +
(inputImageMask.asKnown().getOrNull()?.validity() ?: 0) +
- (model.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (model.asKnown().isPresent) 1 else 0) +
(moderation.asKnown().getOrNull()?.validity() ?: 0) +
(if (outputCompression.asKnown().isPresent) 1 else 0) +
(outputFormat.asKnown().getOrNull()?.validity() ?: 0) +
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/videos/Video.kt b/openai-java-core/src/main/kotlin/com/openai/models/videos/Video.kt
index 486552d3..81790eed 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/videos/Video.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/videos/Video.kt
@@ -439,6 +439,14 @@ private constructor(
*/
fun model(model: JsonField) = apply { this.model = model }
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [VideoModel] constant instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun model(value: String) = model(VideoModel.of(value))
+
/**
* Sets the field to an arbitrary JSON value.
*
@@ -606,7 +614,7 @@ private constructor(
createdAt()
error().ifPresent { it.validate() }
expiresAt()
- model().validate()
+ model()
_object_().let {
if (it != JsonValue.from("video")) {
throw OpenAIInvalidDataException("'object_' is invalid, received $it")
@@ -641,7 +649,7 @@ private constructor(
(if (createdAt.asKnown().isPresent) 1 else 0) +
(error.asKnown().getOrNull()?.validity() ?: 0) +
(if (expiresAt.asKnown().isPresent) 1 else 0) +
- (model.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (model.asKnown().isPresent) 1 else 0) +
object_.let { if (it == JsonValue.from("video")) 1 else 0 } +
(if (progress.asKnown().isPresent) 1 else 0) +
(if (prompt.asKnown().isPresent) 1 else 0) +
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/videos/VideoCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/videos/VideoCreateParams.kt
index 761924a0..a2de32d4 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/videos/VideoCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/videos/VideoCreateParams.kt
@@ -208,6 +208,14 @@ private constructor(
*/
fun model(model: MultipartField) = apply { body.model(model) }
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [VideoModel] constant instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun model(value: String) = apply { body.model(value) }
+
/** Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds. */
fun seconds(seconds: VideoSeconds) = apply { body.seconds(seconds) }
@@ -580,6 +588,15 @@ private constructor(
*/
fun model(model: MultipartField) = apply { this.model = model }
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [VideoModel] constant instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun model(value: String) = model(VideoModel.of(value))
+
/** Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds. */
fun seconds(seconds: VideoSeconds) = seconds(MultipartField.of(seconds))
@@ -658,7 +675,7 @@ private constructor(
prompt()
inputReference()
- model().ifPresent { it.validate() }
+ model()
seconds().ifPresent { it.validate() }
size().ifPresent { it.validate() }
validated = true
diff --git a/openai-java-core/src/test/kotlin/com/openai/models/images/ImageCreateVariationParamsTest.kt b/openai-java-core/src/test/kotlin/com/openai/models/images/ImageCreateVariationParamsTest.kt
index e1b5175f..46f18e27 100644
--- a/openai-java-core/src/test/kotlin/com/openai/models/images/ImageCreateVariationParamsTest.kt
+++ b/openai-java-core/src/test/kotlin/com/openai/models/images/ImageCreateVariationParamsTest.kt
@@ -13,7 +13,7 @@ internal class ImageCreateVariationParamsTest {
fun create() {
ImageCreateVariationParams.builder()
.image("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.responseFormat(ImageCreateVariationParams.ResponseFormat.URL)
.size(ImageCreateVariationParams.Size._1024X1024)
@@ -26,7 +26,7 @@ internal class ImageCreateVariationParamsTest {
val params =
ImageCreateVariationParams.builder()
.image("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.responseFormat(ImageCreateVariationParams.ResponseFormat.URL)
.size(ImageCreateVariationParams.Size._1024X1024)
@@ -46,7 +46,7 @@ internal class ImageCreateVariationParamsTest {
.isEqualTo(
mapOf(
"image" to MultipartField.of("some content".byteInputStream()),
- "model" to MultipartField.of(ImageModel.DALL_E_2),
+ "model" to MultipartField.of(ImageModel.GPT_IMAGE_1_5),
"n" to MultipartField.of(1L),
"response_format" to
MultipartField.of(ImageCreateVariationParams.ResponseFormat.URL),
diff --git a/openai-java-core/src/test/kotlin/com/openai/models/images/ImageEditParamsTest.kt b/openai-java-core/src/test/kotlin/com/openai/models/images/ImageEditParamsTest.kt
index 687a15e0..5ec0f76b 100644
--- a/openai-java-core/src/test/kotlin/com/openai/models/images/ImageEditParamsTest.kt
+++ b/openai-java-core/src/test/kotlin/com/openai/models/images/ImageEditParamsTest.kt
@@ -17,7 +17,7 @@ internal class ImageEditParamsTest {
.background(ImageEditParams.Background.TRANSPARENT)
.inputFidelity(ImageEditParams.InputFidelity.HIGH)
.mask("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.outputCompression(100L)
.outputFormat(ImageEditParams.OutputFormat.PNG)
@@ -38,7 +38,7 @@ internal class ImageEditParamsTest {
.background(ImageEditParams.Background.TRANSPARENT)
.inputFidelity(ImageEditParams.InputFidelity.HIGH)
.mask("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.outputCompression(100L)
.outputFormat(ImageEditParams.OutputFormat.PNG)
@@ -74,7 +74,7 @@ internal class ImageEditParamsTest {
"background" to MultipartField.of(ImageEditParams.Background.TRANSPARENT),
"input_fidelity" to MultipartField.of(ImageEditParams.InputFidelity.HIGH),
"mask" to MultipartField.of("some content".byteInputStream()),
- "model" to MultipartField.of(ImageModel.DALL_E_2),
+ "model" to MultipartField.of(ImageModel.GPT_IMAGE_1_5),
"n" to MultipartField.of(1L),
"output_compression" to MultipartField.of(100L),
"output_format" to MultipartField.of(ImageEditParams.OutputFormat.PNG),
diff --git a/openai-java-core/src/test/kotlin/com/openai/models/images/ImageGenerateParamsTest.kt b/openai-java-core/src/test/kotlin/com/openai/models/images/ImageGenerateParamsTest.kt
index 8cd1386e..f8b4b632 100644
--- a/openai-java-core/src/test/kotlin/com/openai/models/images/ImageGenerateParamsTest.kt
+++ b/openai-java-core/src/test/kotlin/com/openai/models/images/ImageGenerateParamsTest.kt
@@ -12,7 +12,7 @@ internal class ImageGenerateParamsTest {
ImageGenerateParams.builder()
.prompt("A cute baby sea otter")
.background(ImageGenerateParams.Background.TRANSPARENT)
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.moderation(ImageGenerateParams.Moderation.LOW)
.n(1L)
.outputCompression(100L)
@@ -32,7 +32,7 @@ internal class ImageGenerateParamsTest {
ImageGenerateParams.builder()
.prompt("A cute baby sea otter")
.background(ImageGenerateParams.Background.TRANSPARENT)
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.moderation(ImageGenerateParams.Moderation.LOW)
.n(1L)
.outputCompression(100L)
@@ -49,7 +49,7 @@ internal class ImageGenerateParamsTest {
assertThat(body.prompt()).isEqualTo("A cute baby sea otter")
assertThat(body.background()).contains(ImageGenerateParams.Background.TRANSPARENT)
- assertThat(body.model()).contains(ImageModel.DALL_E_2)
+ assertThat(body.model()).contains(ImageModel.GPT_IMAGE_1_5)
assertThat(body.moderation()).contains(ImageGenerateParams.Moderation.LOW)
assertThat(body.n()).contains(1L)
assertThat(body.outputCompression()).contains(100L)
diff --git a/openai-java-core/src/test/kotlin/com/openai/models/images/ImagesResponseTest.kt b/openai-java-core/src/test/kotlin/com/openai/models/images/ImagesResponseTest.kt
index 4a3117f2..6d420c09 100644
--- a/openai-java-core/src/test/kotlin/com/openai/models/images/ImagesResponseTest.kt
+++ b/openai-java-core/src/test/kotlin/com/openai/models/images/ImagesResponseTest.kt
@@ -37,6 +37,12 @@ internal class ImagesResponseTest {
)
.outputTokens(0L)
.totalTokens(0L)
+ .outputTokensDetails(
+ ImagesResponse.Usage.OutputTokensDetails.builder()
+ .imageTokens(0L)
+ .textTokens(0L)
+ .build()
+ )
.build()
)
.build()
@@ -66,6 +72,12 @@ internal class ImagesResponseTest {
)
.outputTokens(0L)
.totalTokens(0L)
+ .outputTokensDetails(
+ ImagesResponse.Usage.OutputTokensDetails.builder()
+ .imageTokens(0L)
+ .textTokens(0L)
+ .build()
+ )
.build()
)
}
@@ -98,6 +110,12 @@ internal class ImagesResponseTest {
)
.outputTokens(0L)
.totalTokens(0L)
+ .outputTokensDetails(
+ ImagesResponse.Usage.OutputTokensDetails.builder()
+ .imageTokens(0L)
+ .textTokens(0L)
+ .build()
+ )
.build()
)
.build()
diff --git a/openai-java-core/src/test/kotlin/com/openai/services/async/ImageServiceAsyncTest.kt b/openai-java-core/src/test/kotlin/com/openai/services/async/ImageServiceAsyncTest.kt
index 22903460..0c71f5c4 100644
--- a/openai-java-core/src/test/kotlin/com/openai/services/async/ImageServiceAsyncTest.kt
+++ b/openai-java-core/src/test/kotlin/com/openai/services/async/ImageServiceAsyncTest.kt
@@ -27,7 +27,7 @@ internal class ImageServiceAsyncTest {
imageServiceAsync.createVariation(
ImageCreateVariationParams.builder()
.image("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.responseFormat(ImageCreateVariationParams.ResponseFormat.URL)
.size(ImageCreateVariationParams.Size._1024X1024)
@@ -56,7 +56,7 @@ internal class ImageServiceAsyncTest {
.background(ImageEditParams.Background.TRANSPARENT)
.inputFidelity(ImageEditParams.InputFidelity.HIGH)
.mask("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.outputCompression(100L)
.outputFormat(ImageEditParams.OutputFormat.PNG)
@@ -89,7 +89,7 @@ internal class ImageServiceAsyncTest {
.background(ImageEditParams.Background.TRANSPARENT)
.inputFidelity(ImageEditParams.InputFidelity.HIGH)
.mask("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.outputCompression(100L)
.outputFormat(ImageEditParams.OutputFormat.PNG)
@@ -122,7 +122,7 @@ internal class ImageServiceAsyncTest {
ImageGenerateParams.builder()
.prompt("A cute baby sea otter")
.background(ImageGenerateParams.Background.TRANSPARENT)
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.moderation(ImageGenerateParams.Moderation.LOW)
.n(1L)
.outputCompression(100L)
@@ -154,7 +154,7 @@ internal class ImageServiceAsyncTest {
ImageGenerateParams.builder()
.prompt("A cute baby sea otter")
.background(ImageGenerateParams.Background.TRANSPARENT)
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.moderation(ImageGenerateParams.Moderation.LOW)
.n(1L)
.outputCompression(100L)
diff --git a/openai-java-core/src/test/kotlin/com/openai/services/blocking/ImageServiceTest.kt b/openai-java-core/src/test/kotlin/com/openai/services/blocking/ImageServiceTest.kt
index c014b8ef..0c6b44d3 100644
--- a/openai-java-core/src/test/kotlin/com/openai/services/blocking/ImageServiceTest.kt
+++ b/openai-java-core/src/test/kotlin/com/openai/services/blocking/ImageServiceTest.kt
@@ -27,7 +27,7 @@ internal class ImageServiceTest {
imageService.createVariation(
ImageCreateVariationParams.builder()
.image("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.responseFormat(ImageCreateVariationParams.ResponseFormat.URL)
.size(ImageCreateVariationParams.Size._1024X1024)
@@ -55,7 +55,7 @@ internal class ImageServiceTest {
.background(ImageEditParams.Background.TRANSPARENT)
.inputFidelity(ImageEditParams.InputFidelity.HIGH)
.mask("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.outputCompression(100L)
.outputFormat(ImageEditParams.OutputFormat.PNG)
@@ -87,7 +87,7 @@ internal class ImageServiceTest {
.background(ImageEditParams.Background.TRANSPARENT)
.inputFidelity(ImageEditParams.InputFidelity.HIGH)
.mask("some content".byteInputStream())
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.n(1L)
.outputCompression(100L)
.outputFormat(ImageEditParams.OutputFormat.PNG)
@@ -120,7 +120,7 @@ internal class ImageServiceTest {
ImageGenerateParams.builder()
.prompt("A cute baby sea otter")
.background(ImageGenerateParams.Background.TRANSPARENT)
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.moderation(ImageGenerateParams.Moderation.LOW)
.n(1L)
.outputCompression(100L)
@@ -151,7 +151,7 @@ internal class ImageServiceTest {
ImageGenerateParams.builder()
.prompt("A cute baby sea otter")
.background(ImageGenerateParams.Background.TRANSPARENT)
- .model(ImageModel.DALL_E_2)
+ .model(ImageModel.GPT_IMAGE_1_5)
.moderation(ImageGenerateParams.Moderation.LOW)
.n(1L)
.outputCompression(100L)