diff --git a/python/.cspell.json b/python/.cspell.json index a26cc7fed7..c5bf954d2b 100644 --- a/python/.cspell.json +++ b/python/.cspell.json @@ -24,6 +24,7 @@ ], "words": [ "aeiou", + "agentserver", "agui", "aiplatform", "azuredocindex", diff --git a/python/packages/core/agent_framework/_telemetry.py b/python/packages/core/agent_framework/_telemetry.py index a044fc9d02..8e59704b8a 100644 --- a/python/packages/core/agent_framework/_telemetry.py +++ b/python/packages/core/agent_framework/_telemetry.py @@ -26,6 +26,28 @@ HTTP_USER_AGENT: Final[str] = "agent-framework-python" AGENT_FRAMEWORK_USER_AGENT = f"{HTTP_USER_AGENT}/{version_info}" # type: ignore[has-type] +_user_agent_prefixes: list[str] = [] + + +def append_to_user_agent(prefix: str) -> None: + """Prepend a prefix to the agent framework user agent string. + + This is useful for hosting layers that want to identify themselves in telemetry. + Duplicate prefixes are ignored. + + Args: + prefix: The prefix to prepend (e.g. "foundry-hosting"). + """ + if prefix and prefix not in _user_agent_prefixes: + _user_agent_prefixes.append(prefix) + + +def _get_user_agent() -> str: + """Return the full user agent string including any prepended prefixes.""" + if not _user_agent_prefixes: + return AGENT_FRAMEWORK_USER_AGENT + return f"{'/'.join(_user_agent_prefixes)}/{AGENT_FRAMEWORK_USER_AGENT}" + def prepend_agent_framework_to_user_agent(headers: dict[str, Any] | None = None) -> dict[str, Any]: """Prepend "agent-framework" to the User-Agent in the headers. @@ -57,12 +79,9 @@ def prepend_agent_framework_to_user_agent(headers: dict[str, Any] | None = None) """ if not IS_TELEMETRY_ENABLED: return headers or {} + user_agent = _get_user_agent() if not headers: - return {USER_AGENT_KEY: AGENT_FRAMEWORK_USER_AGENT} - headers[USER_AGENT_KEY] = ( - f"{AGENT_FRAMEWORK_USER_AGENT} {headers[USER_AGENT_KEY]}" - if USER_AGENT_KEY in headers - else AGENT_FRAMEWORK_USER_AGENT - ) + return {USER_AGENT_KEY: user_agent} + headers[USER_AGENT_KEY] = f"{user_agent} {headers[USER_AGENT_KEY]}" if USER_AGENT_KEY in headers else user_agent return headers diff --git a/python/packages/foundry_hosting/LICENSE b/python/packages/foundry_hosting/LICENSE new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/python/packages/foundry_hosting/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/foundry_hosting/README.md b/python/packages/foundry_hosting/README.md new file mode 100644 index 0000000000..9222c01aa8 --- /dev/null +++ b/python/packages/foundry_hosting/README.md @@ -0,0 +1,11 @@ +# Foundry Hosting + +This package provides the integration of Agent Framework agents and workflows with the Foundry Agent Server, which can be hosted on Foundry infrastructure. + +## Responses + +TODO + +## Invocations + +TODO diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py new file mode 100644 index 0000000000..81e8430783 --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib.metadata + +from ._invocations import InvocationsHostServer +from ._responses import ResponsesHostServer + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" + +__all__ = ["InvocationsHostServer", "ResponsesHostServer"] diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py new file mode 100644 index 0000000000..08407874df --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft. All rights reserved. + +from agent_framework import AgentSession, BaseAgent, SupportsAgentRun +from agent_framework._telemetry import append_to_user_agent +from azure.ai.agentserver.invocations import InvocationAgentServerHost +from starlette.requests import Request +from starlette.responses import JSONResponse, Response, StreamingResponse +from typing_extensions import Any, AsyncGenerator, Optional + + +class InvocationsHostServer(InvocationAgentServerHost): + """An invocations server host for an agent.""" + + USER_AGENT_PREFIX = "foundry-hosting" + + def __init__( + self, + agent: BaseAgent, + *, + stream: bool = False, + openapi_spec: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Initialize an InvocationsHostServer. + + Args: + agent: The agent to handle responses for. + stream: Whether to stream the responses. Defaults to True. + openapi_spec: The OpenAPI specification for the server. + **kwargs: Additional keyword arguments. + + This host will expect the request to be a JSON body with a "message" field. + The response from the host will be a JSON object with a "response" field containing + the agent's response and a "session_id" field containing the session ID. + """ + super().__init__(openapi_spec=openapi_spec, **kwargs) + + if not isinstance(agent, SupportsAgentRun): + raise TypeError("Agent must support the SupportsAgentRun interface") + + append_to_user_agent(self.USER_AGENT_PREFIX) + self._agent = agent + self._stream = stream + self._sessions: dict[str, AgentSession] = {} + self.invoke_handler(self._handle_invoke) # pyright: ignore[reportUnknownMemberType] + + async def _handle_invoke(self, request: Request) -> Response: + """Invoke the agent with the given request.""" + data = await request.json() + session_id: str = request.state.session_id + + user_message = data.get("message", None) + if user_message is None: + error = "Missing 'message' in request" + if self._stream: + return StreamingResponse(content=error, status_code=400) + return Response(content=error, status_code=400) + + session = self._sessions.setdefault(session_id, AgentSession(session_id=session_id)) + + if self._stream: + + async def stream_response() -> AsyncGenerator[str]: + async for update in self._agent.run(user_message, session=session, stream=True): + yield update.text + + return StreamingResponse( + stream_response(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}, + ) + + response = await self._agent.run([user_message], session=session, stream=self._stream) + return JSONResponse({ + "response": response.text, + "session_id": session_id, + }) diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py new file mode 100644 index 0000000000..94c710f5b8 --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -0,0 +1,304 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from collections.abc import AsyncIterable + +from agent_framework import Agent, ChatOptions, Content, HistoryProvider, Message +from agent_framework._telemetry import append_to_user_agent +from azure.ai.agentserver.responses import ( + ResponseContext, + ResponseEventStream, + ResponseProviderProtocol, + ResponsesServerOptions, +) +from azure.ai.agentserver.responses.hosting import ResponsesAgentServerHost +from azure.ai.agentserver.responses.models import ( + ComputerScreenshotContent, + CreateResponse, + FunctionCallOutputItemParam, + MessageContent, + MessageContentInputFileContent, + MessageContentInputImageContent, + MessageContentInputTextContent, + MessageContentOutputTextContent, + MessageContentReasoningTextContent, + MessageContentRefusalContent, + OutputItem, + OutputItemFunctionToolCall, + OutputItemMessage, + OutputItemOutputMessage, + OutputItemReasoningItem, + OutputMessageContent, + OutputMessageContentOutputTextContent, + OutputMessageContentRefusalContent, + SummaryTextContent, + TextContent, + get_input_text, +) +from typing_extensions import Any, Sequence, cast + + +class ResponsesHostServer(ResponsesAgentServerHost): + """A responses server host for an agent.""" + + USER_AGENT_PREFIX = "foundry-hosting" + + def __init__( + self, + agent: Agent, + *, + prefix: str = "", + options: ResponsesServerOptions | None = None, + provider: ResponseProviderProtocol | None = None, + **kwargs: Any, + ) -> None: + """Initialize a ResponsesHostServer. + + Args: + agent: The agent to handle responses for. + prefix: The URL prefix for the server. + options: Optional server options. + provider: Optional response provider. + **kwargs: Additional keyword arguments. + + Note: + The agent must not have a history provider with `load_messages=True`, + because history is managed by the hosting infrastructure. + """ + super().__init__(prefix=prefix, options=options, provider=provider, **kwargs) + + self._validate_agent(agent) + self._agent = agent + self.create_handler(self._handle_create) # pyright: ignore[reportUnknownMemberType] + + # Append the user agent prefix for telemetry purposes + append_to_user_agent(self.USER_AGENT_PREFIX) + + def _validate_agent(self, agent: Agent) -> None: + """Validate the agent to ensure it does not have a history provider with `load_messages=True`. + + History is managed by the hosting infrastructure. + """ + for provider in agent.context_providers: + if isinstance(provider, HistoryProvider) and provider.load_messages: + raise RuntimeError( + "There shouldn't be a history provider with `load_messages=True` already present. " + "History is managed by the hosting infrastructure." + ) + + async def _handle_create( + self, + request: CreateResponse, + context: ResponseContext, + cancellation_signal: asyncio.Event, + ) -> AsyncIterable[dict[str, Any]]: + """Handle the creation of a response.""" + input_items = get_input_text(request) + history = await context.get_history() + messages = [*_to_messages(history), input_items] + + chat_options = _to_chat_options(request) + + stream = ResponseEventStream(response_id=context.response_id, model=request.model) + + yield stream.emit_created() + yield stream.emit_in_progress() + + # Add reasoning + + if request.stream is None or request.stream is False: + # Run the agent in non-streaming mode + response = await self._agent.run(messages, stream=False, options=chat_options) + for item in stream.output_item_message(response.text): + yield item + yield stream.emit_completed() + return + + # Start the streaming response + message_item = stream.add_output_item_message() + yield message_item.emit_added() + text_content = message_item.add_text_content() + yield text_content.emit_added() + + # Invoke the MAF agent + response_stream = self._agent.run(messages, stream=True, options=chat_options) + async for update in response_stream: + if update.text: + yield text_content.emit_delta(update.text) + + # Complete the message + final = await response_stream.get_final_response() + yield text_content.emit_done(final.text) + yield message_item.emit_content_done(text_content) + yield message_item.emit_done() + + yield stream.emit_completed() + + +# region Option Conversion + + +def _to_chat_options(request: CreateResponse) -> ChatOptions: + """Converts a CreateResponse request to ChatOptions. + + Args: + request (CreateResponse): The request to convert. + + Returns: + ChatOptions: The converted ChatOptions. + """ + chat_options = ChatOptions() + + if request.temperature is not None: + chat_options["temperature"] = request.temperature + if request.top_p is not None: + chat_options["top_p"] = request.top_p + if request.max_output_tokens is not None: + chat_options["max_tokens"] = request.max_output_tokens + if request.parallel_tool_calls is not None: + chat_options["allow_multiple_tool_calls"] = request.parallel_tool_calls + + return chat_options + + +# endregion + + +# region Message Conversion + + +def _to_messages(history: Sequence[OutputItem]) -> list[Message]: + """Converts a sequence of OutputItem objects to a list of Message objects. + + Args: + history (Sequence[OutputItem]): The sequence of OutputItem objects to convert. + + Returns: + list[Message]: The list of Message objects. + """ + messages: list[Message] = [] + for item in history: + messages.append(_to_message(item)) + return messages + + +def _to_message(item: OutputItem) -> Message: + """Converts an OutputItem to a Message. + + Args: + item (OutputItem): The OutputItem to convert. + + Returns: + Message: The converted Message. + + Raises: + ValueError: If the OutputItem type is not supported. + """ + if item.type == "output_message": + msg = cast(OutputItemOutputMessage, item) + contents = [_convert_output_message_content(part) for part in msg.content] + return Message(role=msg.role, contents=contents) + + if item.type == "message": + msg = cast(OutputItemMessage, item) + contents = [_convert_message_content(part) for part in msg.content] + return Message(role=msg.role, contents=contents) + + if item.type == "function_call": + fc = cast(OutputItemFunctionToolCall, item) + return Message( + role="assistant", + contents=[Content.from_function_call(fc.call_id, fc.name, arguments=fc.arguments)], + ) + + if item.type == "function_call_output": + fco = cast(FunctionCallOutputItemParam, item) + output = fco.output if isinstance(fco.output, str) else str(fco.output) + return Message( + role="tool", + contents=[Content.from_function_result(fco.call_id, result=output)], + ) + + if item.type == "reasoning": + reasoning = cast(OutputItemReasoningItem, item) + contents: list[Content] = [] + if reasoning.summary: + for summary in reasoning.summary: + contents.append(Content.from_text(summary.text)) + return Message(role="assistant", contents=contents) + + raise ValueError(f"Unsupported OutputItem type: {item.type}") + + +def _convert_output_message_content(content: OutputMessageContent) -> Content: + """Converts an OutputMessageContent to a Content object. + + Args: + content (OutputMessageContent): The OutputMessageContent to convert. + + Returns: + Content: The converted Content object. + + Raises: + ValueError: If the OutputMessageContent type is not supported. + """ + if content.type == "output_text": + text_content = cast(OutputMessageContentOutputTextContent, content) + return Content.from_text(text_content.text) + if content.type == "refusal": + refusal_content = cast(OutputMessageContentRefusalContent, content) + return Content.from_text(refusal_content.refusal) + + raise ValueError(f"Unsupported OutputMessageContent type: {content.type}") + + +def _convert_message_content(content: MessageContent) -> Content: + """Converts a MessageContent to a Content object. + + Args: + content (MessageContent): The MessageContent to convert. + + Returns: + Content: The converted Content object. + + Raises: + ValueError: If the MessageContent type is not supported. + """ + if content.type == "input_text": + input_text = cast(MessageContentInputTextContent, content) + return Content.from_text(input_text.text) + if content.type == "output_text": + output_text = cast(MessageContentOutputTextContent, content) + return Content.from_text(output_text.text) + if content.type == "text": + text = cast(TextContent, content) + return Content.from_text(text.text) + if content.type == "summary_text": + summary = cast(SummaryTextContent, content) + return Content.from_text(summary.text) + if content.type == "refusal": + refusal = cast(MessageContentRefusalContent, content) + return Content.from_text(refusal.refusal) + if content.type == "reasoning_text": + reasoning = cast(MessageContentReasoningTextContent, content) + return Content.from_text_reasoning(text=reasoning.text) + if content.type == "input_image": + image = cast(MessageContentInputImageContent, content) + if image.image_url: + return Content.from_uri(image.image_url) + if image.file_id: + return Content.from_hosted_file(image.file_id) + if content.type == "input_file": + file = cast(MessageContentInputFileContent, content) + if file.file_url: + return Content.from_uri(file.file_url) + if file.file_id: + return Content.from_hosted_file(file.file_id, name=file.filename) + if content.type == "computer_screenshot": + screenshot = cast(ComputerScreenshotContent, content) + return Content.from_uri(screenshot.image_url) + + raise ValueError(f"Unsupported MessageContent type: {content.type}") + + +# endregion diff --git a/python/packages/foundry_hosting/pyproject.toml b/python/packages/foundry_hosting/pyproject.toml new file mode 100644 index 0000000000..55916ac076 --- /dev/null +++ b/python/packages/foundry_hosting/pyproject.toml @@ -0,0 +1,104 @@ +[project] +name = "agent-framework-foundry-hosting" +description = "Foundry Hosting integration for Microsoft Agent Framework." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "1.0.0a260402" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core>=1.0.0,<2", + "azure-ai-agentserver-core", + "azure-ai-agentserver-responses", + "azure-ai-agentserver-invocations" +] + +[tool.uv.sources] +azure-ai-agentserver-responses = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-responses" } +azure-ai-agentserver-invocations = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-invocations" } +azure-ai-agentserver-core = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-core" } + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" + +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [] +timeout = 120 +markers = [ + "integration: marks tests as integration tests that require external services", +] + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" +include = ["agent_framework_foundry_hosting"] +exclude = ['tests'] + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_foundry_hosting"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" + +[tool.poe.tasks.mypy] +help = "Run MyPy for this package." +cmd = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_foundry_hosting" + +[tool.poe.tasks.test] +help = "Run the default unit test suite for this package." +cmd = 'pytest -m "not integration" --cov=agent_framework_foundry_hosting --cov-report=term-missing:skip-covered tests' + +[build-system] +requires = ["flit-core >= 3.11,<4.0"] +build-backend = "flit_core.buildapi" \ No newline at end of file diff --git a/python/pyproject.toml b/python/pyproject.toml index 24af13b940..9e677fb602 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -78,6 +78,7 @@ agent-framework-declarative = { workspace = true } agent-framework-devui = { workspace = true } agent-framework-durabletask = { workspace = true } agent-framework-foundry = { workspace = true } +agent-framework-foundry-hosting = { workspace = true } agent-framework-foundry-local = { workspace = true } agent-framework-lab = { workspace = true } agent-framework-mem0 = { workspace = true } diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md new file mode 100644 index 0000000000..040f562f64 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `invocations` API + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/invocations -H "Content-Type: application/json" -d '{"message": "Hi!"}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py new file mode 100644 index 0000000000..17f9cb6341 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import InvocationsHostServer +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = InvocationsHostServer(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/README.md new file mode 100644 index 0000000000..e6eaa7c6c7 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/README.md @@ -0,0 +1,35 @@ +# Basic example of hosting an agent with the `responses` API + +## Running the server locally + +Run the following command to start the server: + +```bash +python main.py +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.manifest.yaml new file mode 100644 index 0000000000..27bb7630e8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-basic +description: > + A basic Agent Framework agent hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-basic + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.yaml new file mode 100644 index 0000000000..b42f31863a --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-basic +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/main.py new file mode 100644 index 0000000000..0be716558b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/main.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md new file mode 100644 index 0000000000..b262afa7ab --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `responses` API and local tools + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "What is the weather in Seattle?"}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.manifest.yaml new file mode 100644 index 0000000000..84a6fd95f5 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-local-tools +description: > + An Agent Framework agent with local toolshosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-local-tools + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.yaml new file mode 100644 index 0000000000..a2642beb46 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-local-tools +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py new file mode 100644 index 0000000000..7c8b3f6dc8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from random import randint + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field +from typing_extensions import Annotated + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[get_weather], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md new file mode 100644 index 0000000000..5091871aa8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `responses` API and a remote MCP + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "List all the repositories I own on GitHub."}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.manifest.yaml new file mode 100644 index 0000000000..daf7b10cd3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-remote-mcp-tools +description: > + An Agent Framework agent with remote MCP tools hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-remote-mcp-tools + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.yaml new file mode 100644 index 0000000000..9eaedced27 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-remote-mcp-tools +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py new file mode 100644 index 0000000000..9a244f686f --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + github_pat = os.getenv("GITHUB_PAT") + if not github_pat: + raise ValueError( + "GITHUB_PAT environment variable must be set. Create a token at https://github.com/settings/tokens" + ) + + github_mcp_tool = client.get_mcp_tool( + name="GitHub", + url="https://api.githubcopilot.com/mcp/", + headers={ + "Authorization": f"Bearer {github_pat}", + }, + approval_mode="never_require", + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[github_mcp_tool], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/README.md new file mode 100644 index 0000000000..75d87c3fcb --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `responses` API and a workflow + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Create a slogan for a new electric SUV that is affordable and fun to drive."}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.manifest.yaml new file mode 100644 index 0000000000..027ca3713b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-workflows +description: > + An Agent Framework workflow hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-workflows + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.yaml new file mode 100644 index 0000000000..ee4fedc2ea --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-workflows +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/main.py new file mode 100644 index 0000000000..8965dc6a21 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/main.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient +from agent_framework.orchestrations import GroupChatBuilder, GroupChatState +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def round_robin_selector(state: GroupChatState) -> str: + """A round-robin selector function that picks the next speaker based on the current round index.""" + + participant_names = list(state.participants.keys()) + return participant_names[state.current_round % len(participant_names)] + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + writer_agent = Agent( + client=client, + instructions=( + "You are an excellent content writer. You create new content and edit contents based on the feedback." + ), + name="writer", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + reviewer_agent = Agent( + client=client, + instructions=( + "You are an excellent content reviewer." + "Provide actionable feedback to the writer about the provided content." + "Provide the feedback in the most concise manner possible." + ), + name="reviewer", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + workflow_agent = ( + GroupChatBuilder( + participants=[writer_agent, reviewer_agent], + # Set a hard termination condition to stop after 4 messages: + # User message + writer message + reviewer message + writer message + termination_condition=lambda conversation: len(conversation) >= 4, + selection_func=round_robin_selector, + ) + .build() + .as_agent() + ) + + server = ResponsesHostServer(workflow_agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile new file mode 100644 index 0000000000..845d325e7c --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app/user_agent + +COPY wheels/ /tmp/wheels/ +COPY requirements.txt . +RUN pip install --no-cache-dir --find-links /tmp/wheels/ -r requirements.txt && rm -rf /tmp/wheels/ + +COPY . . + +RUN useradd -r appuser +USER appuser + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md new file mode 100644 index 0000000000..73f1ad05a9 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md @@ -0,0 +1,43 @@ +# Agent Framework Agent with Local Shell + +> Note: This agent can execute local shell commands. We recommend running it in an isolated environment for security reasons. + +## Running the server in a Docker container + +Build the Docker image: + +```bash +docker build -t agent-framework-agent-with-local-shell . +``` + +Run the Docker container: + +```bash +docker run -p 8088:8088 --env-file .env agent-framework-agent-with-local-shell +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml new file mode 100644 index 0000000000..32f133d12d --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-local-shell +description: > + An Agent Framework agent that can execute local shell commands hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-local-shell + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml new file mode 100644 index 0000000000..6c0a04b83b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-local-shell +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py new file mode 100644 index 0000000000..094eb766f4 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +import subprocess + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="always_require") +def run_bash(command: str) -> str: + """Execute a shell command locally and return stdout, stderr, and exit code.""" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=30, + ) + parts: list[str] = [] + if result.stdout: + parts.append(result.stdout) + if result.stderr: + parts.append(f"stderr: {result.stderr}") + parts.append(f"exit_code: {result.returncode}") + return "\n".join(parts) + except subprocess.TimeoutExpired: + return "Command timed out after 30 seconds" + except Exception as e: + return f"Error executing command: {e}" + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[run_bash], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt new file mode 100644 index 0000000000..61b5ba14f7 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt @@ -0,0 +1,2 @@ +agent-framework-core +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md new file mode 100644 index 0000000000..195d10e966 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md @@ -0,0 +1,35 @@ +# Agent Framework Agent with Evaluation + +## Running the server locally + +Run the following command to start the server: + +```bash +python main.py +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml new file mode 100644 index 0000000000..a6cc8af7fd --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-eval +description: > + An Agent Framework agent is evaluated on each response hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-eval + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml new file mode 100644 index 0000000000..7fa752ca8b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-eval +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py new file mode 100644 index 0000000000..7c8b3f6dc8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from random import randint + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field +from typing_extensions import Annotated + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[get_weather], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md new file mode 100644 index 0000000000..e9987de36d --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md @@ -0,0 +1,35 @@ +# Agent Framework Agent with Foundry Memory + +## Running the server locally + +Run the following command to start the server: + +```bash +python main.py +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml new file mode 100644 index 0000000000..b0cf8c5fab --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-foundry-memory +description: > + An Agent Framework agent with memory support hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-foundry-memory + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml new file mode 100644 index 0000000000..90c9a26406 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-foundry-memory +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py new file mode 100644 index 0000000000..1a46bdac3c --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging +import os +from datetime import datetime, timezone + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient, FoundryMemoryProvider +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import ( + MemoryStoreDefaultDefinition, + MemoryStoreDefaultOptions, +) +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +logging.basicConfig(level=logging.INFO) + + +async def _create_memory_store(project_client: AIProjectClient) -> FoundryMemoryProvider: + memory_store_name = f"hosted_agent_memory_{datetime.now(timezone.utc).strftime('%Y%m%d')}" + options = MemoryStoreDefaultOptions( + chat_summary_enabled=True, + user_profile_enabled=True, + user_profile_details=( + "Avoid irrelevant or sensitive data, such as age, financials, precise location, and credentials" + ), + ) + memory_store_definition = MemoryStoreDefaultDefinition( + chat_model=os.environ["FOUNDRY_MODEL"], + embedding_model=os.environ["AZURE_OPENAI_EMBEDDING_MODEL"], + options=options, + ) + memory_store = await project_client.beta.memory_stores.create( + name=memory_store_name, + description="Memory store for Agent Framework with FoundryMemoryProvider", + definition=memory_store_definition, + ) + + return FoundryMemoryProvider( + project_client=project_client, + memory_store_name=memory_store.name, + # Scope memories to a specific user, if not set, the session_id + # will be used as scope, which means memories are only shared within the same session + scope="demo", + # Do not wait to update memories after each interaction (for demo purposes) + # In production, consider setting a delay to batch updates and reduce costs + update_delay=0, + ) + + +async def _delete_memory_store(project_client: AIProjectClient, memory_store_name: str): + await project_client.beta.memory_stores.delete(name=memory_store_name) + + +async def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + # Create the memory store + memory_provider = await _create_memory_store(client.project_client) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + context_providers=[memory_provider], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + + try: + await server.run_async() + finally: + await _delete_memory_store(client.project_client, memory_provider.memory_store_name) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/uv.lock b/python/uv.lock index fef2474e59..b9cf525334 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -42,6 +42,7 @@ members = [ "agent-framework-devui", "agent-framework-durabletask", "agent-framework-foundry", + "agent-framework-foundry-hosting", "agent-framework-foundry-local", "agent-framework-github-copilot", "agent-framework-lab", @@ -343,6 +344,7 @@ all = [ { name = "agent-framework-devui", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-foundry", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "agent-framework-foundry-hosting", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-foundry-local", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-github-copilot", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "agent-framework-lab", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -371,6 +373,7 @@ requires-dist = [ { name = "agent-framework-devui", marker = "extra == 'all'", editable = "packages/devui" }, { name = "agent-framework-durabletask", marker = "extra == 'all'", editable = "packages/durabletask" }, { name = "agent-framework-foundry", marker = "extra == 'all'", editable = "packages/foundry" }, + { name = "agent-framework-foundry-hosting", marker = "extra == 'all'", editable = "packages/foundry_hosting" }, { name = "agent-framework-foundry-local", marker = "extra == 'all'", editable = "packages/foundry_local" }, { name = "agent-framework-github-copilot", marker = "python_full_version >= '3.11' and extra == 'all'", editable = "packages/github_copilot" }, { name = "agent-framework-lab", marker = "extra == 'all'", editable = "packages/lab" }, @@ -497,6 +500,25 @@ requires-dist = [ { name = "azure-ai-projects", specifier = ">=2.0.0,<3.0" }, ] +[[package]] +name = "agent-framework-foundry-hosting" +version = "1.0.0b260402" +source = { editable = "packages/foundry_hosting" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-ai-agentserver-invocations", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-ai-agentserver-responses", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-framework-core", editable = "packages/core" }, + { name = "azure-ai-agentserver-core", git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-core&branch=agentserver%2Fresponses" }, + { name = "azure-ai-agentserver-invocations", git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-invocations&branch=agentserver%2Fresponses" }, + { name = "azure-ai-agentserver-responses", git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-responses&branch=agentserver%2Fresponses" }, +] + [[package]] name = "agent-framework-foundry-local" version = "1.0.0b260402" @@ -996,6 +1018,37 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/64/b4/17d4b0b2a2dc85a6df63d1157e028ed19f90d4cd97c36717afef2bc2f395/attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", size = 67548, upload-time = "2026-03-19T14:22:23.645Z" }, ] +[[package]] +name = "azure-ai-agentserver-core" +version = "2.0.0b1" +source = { git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-core&branch=agentserver%2Fresponses#43579f686f51ebed23b066d06c90a544c0070a0b" } +dependencies = [ + { name = "azure-monitor-opentelemetry-exporter", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "hypercorn", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-api", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-exporter-otlp-proto-grpc", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "starlette", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[[package]] +name = "azure-ai-agentserver-invocations" +version = "1.0.0b1" +source = { git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-invocations&branch=agentserver%2Fresponses#43579f686f51ebed23b066d06c90a544c0070a0b" } +dependencies = [ + { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[[package]] +name = "azure-ai-agentserver-responses" +version = "1.0.0b1" +source = { git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-responses&branch=agentserver%2Fresponses#43579f686f51ebed23b066d06c90a544c0070a0b" } +dependencies = [ + { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + [[package]] name = "azure-ai-inference" version = "1.0.0b9" @@ -1108,6 +1161,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/49/9a/417b3a533e01953a7c618884df2cb05a71e7b68bdbce4fbdb62349d2a2e8/azure_identity-1.25.3-py3-none-any.whl", hash = "sha256:f4d0b956a8146f30333e071374171f3cfa7bdb8073adb8c3814b65567aa7447c", size = 192138, upload-time = "2026-03-13T01:12:22.951Z" }, ] +[[package]] +name = "azure-monitor-opentelemetry-exporter" +version = "1.0.0b51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-identity", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "msrest", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-api", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "psutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/a4/a6cd2d389bc1009300bcd57c9e2ace4b7e7ae1e5dc0bda415ee803629cf2/azure_monitor_opentelemetry_exporter-1.0.0b51.tar.gz", hash = "sha256:a6171c34326bcd6216938bb40d715c15f1f22984ac1986fc97231336d8ac4c3c", size = 319837, upload-time = "2026-04-06T21:45:46.378Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/1a/6b0b7a6181b42709103a65a676c89fd5055cb1d1b281ebe10c49254a170f/azure_monitor_opentelemetry_exporter-1.0.0b51-py2.py3-none-any.whl", hash = "sha256:6572cac11f96e3b18ae1187cb35cf3b40d0004655dae8048896c41c765bea530", size = 242104, upload-time = "2026-04-06T21:45:47.856Z" }, +] + [[package]] name = "azure-search-documents" version = "11.7.0b2" @@ -1423,7 +1493,7 @@ name = "clr-loader" version = "0.2.10" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/18/24/c12faf3f61614b3131b5c98d3bf0d376b49c7feaa73edca559aeb2aee080/clr_loader-0.2.10.tar.gz", hash = "sha256:81f114afbc5005bafc5efe5af1341d400e22137e275b042a8979f3feb9fc9446", size = 83605, upload-time = "2026-01-03T23:13:06.984Z" } wheels = [ @@ -2668,6 +2738,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a9/ae/8a3a16ea4d202cb641b51d2681bdd3d482c1c592d7570b3fa264730829ce/huggingface_hub-1.8.0-py3-none-any.whl", hash = "sha256:d3eb5047bd4e33c987429de6020d4810d38a5bef95b3b40df9b17346b7f353f2", size = 625208, upload-time = "2026-03-25T16:01:26.603Z" }, ] +[[package]] +name = "hypercorn" +version = "0.18.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "h11", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "h2", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "priority", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "taskgroup", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "tomli", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "wsproto", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/44/01/39f41a014b83dd5c795217362f2ca9071cf243e6a75bdcd6cd5b944658cc/hypercorn-0.18.0.tar.gz", hash = "sha256:d63267548939c46b0247dc8e5b45a9947590e35e64ee73a23c074aa3cf88e9da", size = 68420, upload-time = "2025-11-08T13:54:04.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/35/850277d1b17b206bd10874c8a9a3f52e059452fb49bb0d22cbb908f6038b/hypercorn-0.18.0-py3-none-any.whl", hash = "sha256:225e268f2c1c2f28f6d8f6db8f40cb8c992963610c5725e13ccfcddccb24b1cd", size = 61640, upload-time = "2025-11-08T13:54:03.202Z" }, +] + [[package]] name = "hyperframe" version = "6.1.0" @@ -3604,6 +3693,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" }, ] +[[package]] +name = "msrest" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "certifi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests-oauthlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/77/8397c8fb8fc257d8ea0fa66f8068e073278c65f05acb17dcb22a02bfdc42/msrest-0.7.1.zip", hash = "sha256:6e7661f46f3afd88b75667b7187a92829924446c7ea1d169be8c4bb7eeb788b9", size = 175332, upload-time = "2022-06-13T22:41:25.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/cf/f2966a2638144491f8696c27320d5219f48a072715075d168b31d3237720/msrest-0.7.1-py3-none-any.whl", hash = "sha256:21120a810e1233e5e6cc7fe40b474eeb4ec6f757a15d7cf86702c369f9567c32", size = 85384, upload-time = "2022-06-13T22:41:22.42Z" }, +] + [[package]] name = "multidict" version = "6.7.1" @@ -4673,8 +4778,8 @@ name = "powerfx" version = "0.0.34" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, - { name = "pythonnet", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "pythonnet", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9f/fb/6c4bf87e0c74ca1c563921ce89ca1c5785b7576bca932f7255cdf81082a7/powerfx-0.0.34.tar.gz", hash = "sha256:956992e7afd272657ed16d80f4cad24ec95d9e4a79fb9dfa4a068a09e136af32", size = 3237555, upload-time = "2025-12-22T15:50:59.682Z" } wheels = [ @@ -4705,6 +4810,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/53/05/9cca1708bb8c65264124eb4b04251e0f65ce5bfc707080bb6b492d5a0df7/prek-0.3.8-py3-none-win_arm64.whl", hash = "sha256:a2614647aeafa817a5802ccb9561e92eedc20dcf840639a1b00826e2c2442515", size = 5190872, upload-time = "2026-03-23T08:23:29.463Z" }, ] +[[package]] +name = "priority" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/3c/eb7c35f4dcede96fca1842dac5f4f5d15511aa4b52f3a961219e68ae9204/priority-2.0.0.tar.gz", hash = "sha256:c965d54f1b8d0d0b19479db3924c7c36cf672dbf2aec92d43fbdaf4492ba18c0", size = 24792, upload-time = "2021-06-27T10:15:05.487Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/5f/82c8074f7e84978129347c2c6ec8b6c59f3584ff1a20bc3c940a3e061790/priority-2.0.0-py3-none-any.whl", hash = "sha256:6f8eefce5f3ad59baf2c080a664037bb4725cd0a790d53d59ab4059288faf6aa", size = 8946, upload-time = "2021-06-27T10:15:03.856Z" }, +] + [[package]] name = "propcache" version = "0.4.1" @@ -5341,7 +5455,7 @@ name = "pythonnet" version = "3.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "clr-loader", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "clr-loader", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/d6/1afd75edd932306ae9bd2c2d961d603dc2b52fcec51b04afea464f1f6646/pythonnet-3.0.5.tar.gz", hash = "sha256:48e43ca463941b3608b32b4e236db92d8d40db4c58a75ace902985f76dac21cf", size = 239212, upload-time = "2024-12-13T08:30:44.393Z" } wheels = [ @@ -5644,6 +5758,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d7/8e/7540e8a2036f79a125c1d2ebadf69ed7901608859186c856fa0388ef4197/requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a", size = 64947, upload-time = "2026-03-30T16:09:13.83Z" }, ] +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + [[package]] name = "rich" version = "13.9.4" @@ -6351,6 +6478,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/55/db07de81b5c630da5cbf5c7df646580ca26dfaefa593667fc6f2fe016d2e/tabulate-0.10.0-py3-none-any.whl", hash = "sha256:f0b0622e567335c8fabaaa659f1b33bcb6ddfe2e496071b743aa113f8774f2d3", size = 39814, upload-time = "2026-03-04T18:55:31.284Z" }, ] +[[package]] +name = "taskgroup" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f0/8d/e218e0160cc1b692e6e0e5ba34e8865dbb171efeb5fc9a704544b3020605/taskgroup-0.2.2.tar.gz", hash = "sha256:078483ac3e78f2e3f973e2edbf6941374fbea81b9c5d0a96f51d297717f4752d", size = 11504, upload-time = "2025-01-03T09:24:13.761Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b1/74babcc824a57904e919f3af16d86c08b524c0691504baf038ef2d7f655c/taskgroup-0.2.2-py2.py3-none-any.whl", hash = "sha256:e2c53121609f4ae97303e9ea1524304b4de6faf9eb2c9280c7f87976479a52fb", size = 14237, upload-time = "2025-01-03T09:24:11.41Z" }, +] + [[package]] name = "tau2" version = "0.0.1" @@ -7050,6 +7190,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, ] +[[package]] +name = "wsproto" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "h11", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/79/12135bdf8b9c9367b8701c2c19a14c913c120b882d50b014ca0d38083c2c/wsproto-1.3.2.tar.gz", hash = "sha256:b86885dcf294e15204919950f666e06ffc6c7c114ca900b060d6e16293528294", size = 50116, upload-time = "2025-11-20T18:18:01.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/f5/10b68b7b1544245097b2a1b8238f66f2fc6dcaeb24ba5d917f52bd2eed4f/wsproto-1.3.2-py3-none-any.whl", hash = "sha256:61eea322cdf56e8cc904bd3ad7573359a242ba65688716b0710a5eb12beab584", size = 24405, upload-time = "2025-11-20T18:18:00.454Z" }, +] + [[package]] name = "yarl" version = "1.23.0"