diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index c568d4f3..91ec14ae 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 9c8bd4d6bf675b159a80173b97c1265c + docChecksum: e0186c33d0269977e1790dfcc7d11aac docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.8.2 - configChecksum: 5024c28578f991eabb85310ad8df96b7 + releaseVersion: 1.8.3 + configChecksum: 81d0549cb4d8bf7fd453dea94ea12376 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -193,6 +193,7 @@ generatedFiles: - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md + - docs/models/filechunk.md - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md - docs/models/filesapiroutesdownloadfilerequest.md @@ -212,7 +213,6 @@ generatedFiles: - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functioncallentry.md @@ -280,7 +280,6 @@ generatedFiles: - docs/models/messageinputentry.md - docs/models/messageinputentrycontent.md - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - docs/models/messageoutputcontentchunks.md - docs/models/messageoutputentry.md - docs/models/messageoutputentrycontent.md @@ -506,6 +505,7 @@ generatedFiles: - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py - src/mistralai/models/eventout.py + - src/mistralai/models/filechunk.py - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py - src/mistralai/models/files_api_routes_download_fileop.py @@ -651,7 +651,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768} "422": {} delete_model_v1_models__model_id__delete: "": diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 77710816..51ac392f 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.8.2 + version: 1.8.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index c618ac1d..f02293c6 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 + sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 tags: - latest - - speakeasy-sdk-regen-1749573609 + - speakeasy-sdk-regen-1750953288 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 + sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:dc4396ba994048a9f31c008dced1a46a9e54d89973e9608039a7bc37b1052957 + codeSamplesRevisionDigest: sha256:23aa878d903a7ef63dfbe3196f3b4d23836239ebd9481f4e1e094f53e21af410 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 265eda73..feaf62c2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -238,4 +238,14 @@ Based on: ### Generated - [python v1.8.2] . ### Releases -- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . \ No newline at end of file +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . + +## 2025-06-26 15:54:30 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.3] . +### Releases +- [PyPI v1.8.3] https://pypi.org/project/mistralai/1.8.3 - . \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md index 0d2d7827..67d450c8 100644 --- a/docs/models/agentsapiv1conversationsgetrequest.md +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md index f0d4f049..7e5d39e9 100644 --- a/docs/models/agentsapiv1conversationshistoryrequest.md +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md index b3189925..a91ab046 100644 --- a/docs/models/agentsapiv1conversationsmessagesrequest.md +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md index 11a2fe2e..a18a41f5 100644 --- a/docs/models/agentsapiv1conversationsrestartrequest.md +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md index 4cbb9d6c..7548286a 100644 --- a/docs/models/agentsapiv1conversationsrestartstreamrequest.md +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 8ace69d9..398e5f5c 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -19,4 +19,4 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 0bab012c..4e924cf0 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -19,4 +19,4 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 0bdbb65f..84ad75c7 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -15,5 +15,6 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Optional[Literal["base"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index a850b5b8..9c239961 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index cf286cda..d1e31a18 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 8cf7fad1..a65cd054 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -27,3 +27,9 @@ value: models.TextChunk = /* values here */ value: models.ReferenceChunk = /* values here */ ``` +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + diff --git a/docs/models/document.md b/docs/models/document.md index e2940355..509d43b7 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -5,6 +5,12 @@ Document to run OCR on ## Supported Types +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + ### `models.DocumentURLChunk` ```python diff --git a/docs/models/filechunk.md b/docs/models/filechunk.md new file mode 100644 index 00000000..46c838b0 --- /dev/null +++ b/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index 1efeadb2..9ecab416 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,6 +19,7 @@ Extra fields for fine-tuned models. | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["fine-tuned"]]* | :heavy_minus_sign: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md index e1e48279..b44a467d 100644 --- a/docs/models/inputentries.md +++ b/docs/models/inputentries.md @@ -9,9 +9,33 @@ value: models.MessageInputEntry = /* values here */ ``` +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + ### `models.FunctionResultEntry` ```python value: models.FunctionResultEntry = /* values here */ ``` +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index a1573ed5..8fa55e47 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,12 +5,12 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 2e399ab6..36b27938 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -9,4 +9,5 @@ | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 239a00f5..357acf0b 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index c7fdb687..96353ddd 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -55,7 +55,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -116,7 +116,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 3a8d57fa..d9a85e63 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -57,7 +57,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -121,7 +121,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 6d6aaa2c..7b11ccc3 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -135,7 +135,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -220,7 +220,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -260,7 +260,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -300,7 +300,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | @@ -447,7 +447,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index d7a5ed85..7dd5d1de 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -55,7 +55,7 @@ with Mistral( ## retrieve -Retrieve a model information. +Retrieve information about a model. ### Example Usage diff --git a/examples/ocr_process_from_file.py b/examples/ocr_process_from_file.py index 70c9d4a8..84a7b4d8 100644 --- a/examples/ocr_process_from_file.py +++ b/examples/ocr_process_from_file.py @@ -26,12 +26,9 @@ def main(): purpose="ocr", ) - signed_url = client.files.get_signed_url(file_id=uploaded_file.id, expiry=1) - pdf_response = client.ocr.process(document={ - "document_url": signed_url.url, - "type": "document_url", - "document_name": "mistral-7b-pdf", + "type": "file", + "file_id": uploaded_file.id, }, model="mistral-ocr-latest", include_image_base64=True) # Print the parsed PDF diff --git a/pyproject.toml b/pyproject.toml index c7cb9095..975b1435 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.8.2" +version = "1.8.3" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index fc416fd3..3a6da97c 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.8.2" +__version__: str = "1.8.3" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.3 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 4fbb25dd..0e7c7ae3 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -69,7 +69,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -238,7 +238,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -409,7 +409,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -586,7 +586,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 96fcf65d..41d4a9f2 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -148,7 +148,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -316,7 +316,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -494,7 +494,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -680,7 +680,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 6ef02edd..6b5771f0 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -708,7 +708,7 @@ def get( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -810,7 +810,7 @@ async def get_async( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1170,7 +1170,7 @@ def get_history( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1269,7 +1269,7 @@ async def get_history_async( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1368,7 +1368,7 @@ def get_messages( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1467,7 +1467,7 @@ async def get_messages_async( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1576,7 +1576,7 @@ def restart( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -1605,10 +1605,10 @@ def restart( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -1708,7 +1708,7 @@ async def restart_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -1737,10 +1737,10 @@ async def restart_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -2408,7 +2408,7 @@ def restart_stream( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -2437,10 +2437,10 @@ def restart_stream( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -2545,7 +2545,7 @@ async def restart_stream_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -2574,10 +2574,10 @@ async def restart_stream_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index e6493e90..712b48a5 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -119,7 +119,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -357,6 +357,7 @@ from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( FilesAPIRoutesDeleteFileRequest, @@ -405,7 +406,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) -from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -565,9 +566,9 @@ MessageInputEntryContent, MessageInputEntryContentTypedDict, MessageInputEntryRole, - MessageInputEntryType, MessageInputEntryTypedDict, Object, + Type, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -993,9 +994,10 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", - "FTModelCardType", "FTModelCardTypedDict", "File", + "FileChunk", + "FileChunkTypedDict", "FilePurpose", "FileSchema", "FileSchemaTypedDict", @@ -1134,7 +1136,6 @@ "MessageInputEntryContent", "MessageInputEntryContentTypedDict", "MessageInputEntryRole", - "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py index 4a800ad6..a37a61ba 100644 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/models/agents_api_v1_conversations_getop.py @@ -11,12 +11,14 @@ class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" class AgentsAPIV1ConversationsGetRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching metadata.""" AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py index 09fb6081..b8c33d1b 100644 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/models/agents_api_v1_conversations_historyop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" class AgentsAPIV1ConversationsHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py index ade66e5e..f0dac8bf 100644 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/models/agents_api_v1_conversations_messagesop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" class AgentsAPIV1ConversationsMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py index c8fd8475..f39b74eb 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: Annotated[ ConversationRestartStreamRequest, diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py index aa867aff..f706c066 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restartop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: Annotated[ ConversationRestartRequest, diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index e99dcfc2..99b33ca4 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -89,6 +89,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionRequest(BaseModel): @@ -132,6 +133,7 @@ class AgentsCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index b4b423f5..8a8cc81c 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -88,6 +88,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionStreamRequest(BaseModel): @@ -130,6 +131,7 @@ class AgentsCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index edb81741..ff4d2838 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["base"] - - class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Type + type: Literal["base"] class BaseModelCard(BaseModel): @@ -51,10 +49,12 @@ class BaseModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("base"))], + Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @@ -69,6 +69,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", ] @@ -76,6 +77,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 004cc011..286bd988 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -89,6 +89,7 @@ class ChatCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -140,6 +141,7 @@ class ChatCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 78a85bef..6516e4bf 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -92,6 +92,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -142,6 +143,7 @@ class ChatCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index ff7d9fcf..4cb8ab6d 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict @@ -17,6 +18,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict, + FileChunkTypedDict, DocumentURLChunkTypedDict, ], ) @@ -28,6 +30,7 @@ Annotated[DocumentURLChunk, Tag("document_url")], Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], + Annotated[FileChunk, Tag("file")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py new file mode 100644 index 00000000..83e60cef --- /dev/null +++ b/src/mistralai/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 9a640a28..73cc418c 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned"] - - class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -30,8 +27,9 @@ class FTModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType + type: Literal["fine-tuned"] archived: NotRequired[bool] @@ -62,11 +60,14 @@ class FTModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + Optional[Literal["fine-tuned"]], + AfterValidator(validate_const("fine-tuned")), ], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -84,6 +85,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", "archived", @@ -92,6 +94,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py index 9c0fea6e..0221f968 100644 --- a/src/mistralai/models/inputentries.py +++ b/src/mistralai/models/inputentries.py @@ -1,18 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict from typing import Union from typing_extensions import TypeAliasType InputEntriesTypedDict = TypeAliasType( "InputEntriesTypedDict", - Union[MessageInputEntryTypedDict, FunctionResultEntryTypedDict], + Union[ + MessageInputEntryTypedDict, + FunctionResultEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], ) InputEntries = TypeAliasType( - "InputEntries", Union[MessageInputEntry, FunctionResultEntry] + "InputEntries", + Union[ + MessageInputEntry, + FunctionResultEntry, + ToolExecutionEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], ) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index 3d642cdf..486fe733 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -14,7 +14,7 @@ Object = Literal["entry"] -MessageInputEntryType = Literal["message.input"] +Type = Literal["message.input"] MessageInputEntryRole = Literal["assistant", "user"] @@ -35,7 +35,7 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] + type: NotRequired[Type] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -50,7 +50,7 @@ class MessageInputEntry(BaseModel): object: Optional[Object] = "entry" - type: Optional[MessageInputEntryType] = "message.input" + type: Optional[Type] = "message.input" created_at: Optional[datetime] = None diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 961f8664..54c5f2a2 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -12,6 +12,7 @@ class ModelCapabilitiesTypedDict(TypedDict): function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] + classification: NotRequired[bool] class ModelCapabilities(BaseModel): @@ -24,3 +25,5 @@ class ModelCapabilities(BaseModel): fine_tuning: Optional[bool] = False vision: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 4f9dfd47..40e67a1f 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -11,12 +12,13 @@ DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", Union[ImageURLChunkTypedDict, DocumentURLChunkTypedDict] + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], ) r"""Document to run OCR on""" -Document = TypeAliasType("Document", Union[ImageURLChunk, DocumentURLChunk]) +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) r"""Document to run OCR on""" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 96aab468..9790cb30 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -206,7 +206,7 @@ def retrieve( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method @@ -308,7 +308,7 @@ async def retrieve_async( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method