diff --git a/examples/chat_history_librarian.py b/examples/chat_history_librarian.py index 206fd46b6..af870f272 100644 --- a/examples/chat_history_librarian.py +++ b/examples/chat_history_librarian.py @@ -27,7 +27,7 @@ class Librarian(OpenAICall): librarian.question = input("(User): ") response = librarian.call() librarian.history.append({"role": "user", "content": librarian.question}) - librarian.history.append({"role": "assistant", "content": response.content}) + librarian.history.append(response.message_param) print(f"(Assistant): {response.content}") # > (User): What fantasy book should I read? diff --git a/mirascope/anthropic/types.py b/mirascope/anthropic/types.py index 2813e92bf..ae7e12f1c 100644 --- a/mirascope/anthropic/types.py +++ b/mirascope/anthropic/types.py @@ -8,6 +8,7 @@ ContentBlockDeltaEvent, ContentBlockStartEvent, Message, + MessageParam, MessageStreamEvent, TextBlock, TextDelta, @@ -92,6 +93,11 @@ class BookRecommender(AnthropicCall): response_format: Optional[Literal["json"]] = None + @property + def message_param(self) -> MessageParam: + """Returns the assistant's response as a message parameter.""" + return self.response.model_dump(include={"content", "role"}) # type: ignore + @property def tools(self) -> Optional[list[AnthropicTool]]: """Returns the tools for the 0th choice message.""" diff --git a/mirascope/base/types.py b/mirascope/base/types.py index 668e02513..a77fa8d95 100644 --- a/mirascope/base/types.py +++ b/mirascope/base/types.py @@ -138,6 +138,11 @@ class BaseCallResponse(BaseModel, Generic[ResponseT, BaseToolT], ABC): model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True) + @property + def message_param(self) -> Any: + """Returns the assistant's response as a message parameter.""" + ... # pragma: no cover + @property @abstractmethod def tools(self) -> Optional[list[BaseToolT]]: diff --git a/mirascope/cohere/types.py b/mirascope/cohere/types.py index 4e235bdba..4b686b489 100644 --- a/mirascope/cohere/types.py +++ b/mirascope/cohere/types.py @@ -116,6 +116,15 @@ class BookRecommender(CohereCall): # We need to skip validation since it's a pydantic_v1 model and breaks validation. response: SkipValidation[NonStreamedChatResponse] + @property + def message_param(self) -> ChatMessage: + """Returns the assistant's response as a message parameter.""" + return ChatMessage( + message=self.response.text, + tool_calls=self.response.tool_calls, + role="assistant", # type: ignore + ) + @property def content(self) -> str: """Returns the content of the chat completion for the 0th choice.""" diff --git a/mirascope/gemini/types.py b/mirascope/gemini/types.py index 7952682d0..0dcd55041 100644 --- a/mirascope/gemini/types.py +++ b/mirascope/gemini/types.py @@ -4,6 +4,7 @@ from google.generativeai.types import ( # type: ignore AsyncGenerateContentResponse, + ContentDict, GenerateContentResponse, ) @@ -75,6 +76,11 @@ class BookRecommender(GeminiPrompt): ``` """ + @property + def message_param(self) -> ContentDict: + """Returns the models's response as a message parameter.""" + return {"role": "model", "parts": self.response.parts} + @property def tools(self) -> Optional[list[GeminiTool]]: """Returns the list of tools for the 0th candidate's 0th content part.""" diff --git a/mirascope/groq/types.py b/mirascope/groq/types.py index 0233834fe..796e18055 100644 --- a/mirascope/groq/types.py +++ b/mirascope/groq/types.py @@ -3,7 +3,7 @@ from typing import Any, Optional, Type, Union from groq._types import Body, Headers, Query -from groq.types.chat import ChatCompletion +from groq.types.chat import ChatCompletion, ChatCompletionAssistantMessageParam from groq.types.chat.chat_completion import ( ChatCompletionMessage, Choice, @@ -104,6 +104,11 @@ class BookRecommender(GroqCall): response_format: Optional[ResponseFormat] = None + @property + def message_param(self) -> ChatCompletionAssistantMessageParam: + """Returns the assistants's response as a message parameter.""" + return self.message.model_dump(exclude={"function_call"}) # type: ignore + @property def choices(self) -> list[Choice]: """Returns the array of chat completion choices.""" diff --git a/mirascope/mistral/types.py b/mirascope/mistral/types.py index 0651f02c5..8342e033a 100644 --- a/mirascope/mistral/types.py +++ b/mirascope/mistral/types.py @@ -15,7 +15,7 @@ ) from pydantic import ConfigDict -from ..base import BaseCallParams, BaseCallResponse, BaseCallResponseChunk +from ..base import BaseCallParams, BaseCallResponse, BaseCallResponseChunk, Message from .tools import MistralTool @@ -68,6 +68,11 @@ class BookRecommender(MistralCall): """ + @property + def message_param(self) -> Message: + """Returns the assistants's response as a message parameter.""" + return self.message.model_dump() # type: ignore + @property def choices(self) -> list[ChatCompletionResponseChoice]: """Returns the array of chat completion choices.""" diff --git a/mirascope/openai/types.py b/mirascope/openai/types.py index 07673a5ea..7fa7e58a1 100644 --- a/mirascope/openai/types.py +++ b/mirascope/openai/types.py @@ -7,6 +7,7 @@ from openai.types import Embedding from openai.types.chat import ( ChatCompletion, + ChatCompletionAssistantMessageParam, ChatCompletionChunk, ChatCompletionMessageToolCall, ChatCompletionToolChoiceOptionParam, @@ -104,6 +105,11 @@ class BookRecommender(OpenAICall): response_format: Optional[ResponseFormat] = None + @property + def message_param(self) -> ChatCompletionAssistantMessageParam: + """Returns the assistants's response as a message parameter.""" + return self.message.model_dump(exclude={"function_call"}) # type: ignore + @property def choices(self) -> list[Choice]: """Returns the array of chat completion choices.""" diff --git a/tests/anthropic/test_types.py b/tests/anthropic/test_types.py index 1fd7ea860..3adf26031 100644 --- a/tests/anthropic/test_types.py +++ b/tests/anthropic/test_types.py @@ -21,6 +21,10 @@ def test_anthropic_call_response(fixture_anthropic_message: Message): response=fixture_anthropic_message, start_time=0, end_time=1 ) assert response.content == "test" + assert response.message_param == { + "content": [{"text": "test", "type": "text"}], + "role": "assistant", + } assert response.tools is None assert response.tool is None assert response.usage is not None diff --git a/tests/cohere/test_types.py b/tests/cohere/test_types.py index dbd3df4e7..62767758e 100644 --- a/tests/cohere/test_types.py +++ b/tests/cohere/test_types.py @@ -4,6 +4,7 @@ from cohere.types import ( ChatCitation, ChatDocument, + ChatMessage, ChatSearchQuery, ChatSearchResult, NonStreamedChatResponse, @@ -42,6 +43,11 @@ def test_cohere_call_response_properties( response=fixture_non_streamed_response, start_time=0, end_time=0, cost=1 ) + assert isinstance(call_response.message_param, ChatMessage) + assert call_response.message_param == ChatMessage( + message="Test response", tool_calls=[fixture_tool_call], role="assistant" + ) # type: ignore + assert call_response.content == "Test response" assert call_response.search_queries == [fixture_chat_search_query] assert call_response.search_results == [fixture_chat_search_result] diff --git a/tests/gemini/test_types.py b/tests/gemini/test_types.py index 1d6d99074..9effea9ea 100644 --- a/tests/gemini/test_types.py +++ b/tests/gemini/test_types.py @@ -3,6 +3,7 @@ from typing import Type import pytest +from google.ai.generativelanguage_v1beta import Part from google.generativeai.types import GenerateContentResponse # type: ignore from mirascope.gemini.tools import GeminiTool @@ -19,6 +20,10 @@ def test_gemini_call_response( end_time=0, cost=None, ) + assert response.message_param == { + "role": "model", + "parts": [Part(text="Who is the author?")], + } assert response.content == "Who is the author?" assert response.tools is None assert response.tool is None diff --git a/tests/groq/test_types.py b/tests/groq/test_types.py index 37205562d..c6682afd7 100644 --- a/tests/groq/test_types.py +++ b/tests/groq/test_types.py @@ -22,6 +22,11 @@ def test_groq_call_response( start_time=0, end_time=0, ) + assert response.message_param == { + "role": "assistant", + "content": "test content", + "tool_calls": [], + } assert response.content == "test content" assert response.tools is None assert response.tool is None diff --git a/tests/mistral/test_types.py b/tests/mistral/test_types.py index aa6bdd098..addd76a38 100644 --- a/tests/mistral/test_types.py +++ b/tests/mistral/test_types.py @@ -24,6 +24,13 @@ def test_mistral_call_response( start_time=0, end_time=0, ) + assert response.message_param == { + "role": "assistant", + "content": "test content", + "name": None, + "tool_call_id": None, + "tool_calls": None, + } assert response.content == "test content" assert response.tools is None assert response.tool is None diff --git a/tests/openai/test_types.py b/tests/openai/test_types.py index 27c58ffdc..bb94c11af 100644 --- a/tests/openai/test_types.py +++ b/tests/openai/test_types.py @@ -28,6 +28,11 @@ def test_openai_call_response(fixture_chat_completion: ChatCompletion): response=fixture_chat_completion, start_time=0, end_time=0 ) choices = fixture_chat_completion.choices + assert response.message_param == { + "role": "assistant", + "content": choices[0].message.content, + "tool_calls": None, + } assert response.choices == choices assert response.choice == choices[0] assert response.message == choices[0].message