diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 30be1b0e8f..21d3aa2ed0 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -32,7 +32,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6
with:
persist-credentials: false
diff --git a/.github/workflows/dotnet-build-and-test.yml b/.github/workflows/dotnet-build-and-test.yml
index 5b44073e3f..4a41b343aa 100644
--- a/.github/workflows/dotnet-build-and-test.yml
+++ b/.github/workflows/dotnet-build-and-test.yml
@@ -37,7 +37,7 @@ jobs:
outputs:
dotnetChanges: ${{ steps.filter.outputs.dotnet}}
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- uses: dorny/paths-filter@v3
id: filter
with:
@@ -68,7 +68,7 @@ jobs:
runs-on: ${{ matrix.os }}
environment: ${{ matrix.environment }}
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
with:
persist-credentials: false
sparse-checkout: |
diff --git a/.github/workflows/dotnet-format.yml b/.github/workflows/dotnet-format.yml
index 757d877028..8d7c9febb7 100644
--- a/.github/workflows/dotnet-format.yml
+++ b/.github/workflows/dotnet-format.yml
@@ -30,7 +30,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v5
+ uses: actions/checkout@v6
with:
fetch-depth: 0
persist-credentials: false
diff --git a/.github/workflows/markdown-link-check.yml b/.github/workflows/markdown-link-check.yml
index 3b015fc6af..5c984c5796 100644
--- a/.github/workflows/markdown-link-check.yml
+++ b/.github/workflows/markdown-link-check.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-22.04
# check out the latest version of the code
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
with:
persist-credentials: false
diff --git a/.github/workflows/python-code-quality.yml b/.github/workflows/python-code-quality.yml
index dd4c0b57cf..176eb3db99 100644
--- a/.github/workflows/python-code-quality.yml
+++ b/.github/workflows/python-code-quality.yml
@@ -27,7 +27,7 @@ jobs:
env:
UV_PYTHON: ${{ matrix.python-version }}
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up python and install the project
diff --git a/.github/workflows/python-docs.yml b/.github/workflows/python-docs.yml
index b2be4b6ad0..f962ec318f 100644
--- a/.github/workflows/python-docs.yml
+++ b/.github/workflows/python-docs.yml
@@ -24,7 +24,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Set up uv
uses: astral-sh/setup-uv@v7
with:
diff --git a/.github/workflows/python-lab-tests.yml b/.github/workflows/python-lab-tests.yml
index ae526cf962..f5cb504d04 100644
--- a/.github/workflows/python-lab-tests.yml
+++ b/.github/workflows/python-lab-tests.yml
@@ -24,7 +24,7 @@ jobs:
outputs:
pythonChanges: ${{ steps.filter.outputs.python}}
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- uses: dorny/paths-filter@v3
id: filter
with:
@@ -59,7 +59,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Set up python and install the project
id: python-setup
diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml
index a30b3c4ac3..66b9122726 100644
--- a/.github/workflows/python-merge-tests.yml
+++ b/.github/workflows/python-merge-tests.yml
@@ -28,7 +28,7 @@ jobs:
outputs:
pythonChanges: ${{ steps.filter.outputs.python}}
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- uses: dorny/paths-filter@v3
id: filter
with:
@@ -75,7 +75,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Set up python and install the project
id: python-setup
uses: ./.github/actions/python-setup
@@ -135,7 +135,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Set up python and install the project
id: python-setup
uses: ./.github/actions/python-setup
diff --git a/.github/workflows/python-release.yml b/.github/workflows/python-release.yml
index 97f1ef2481..ba6e3689b0 100644
--- a/.github/workflows/python-release.yml
+++ b/.github/workflows/python-release.yml
@@ -23,7 +23,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Set up python and install the project
id: python-setup
uses: ./.github/actions/python-setup
diff --git a/.github/workflows/python-test-coverage-report.yml b/.github/workflows/python-test-coverage-report.yml
index 3da82cc943..fa36073fc6 100644
--- a/.github/workflows/python-test-coverage-report.yml
+++ b/.github/workflows/python-test-coverage-report.yml
@@ -19,7 +19,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Download coverage report
uses: actions/download-artifact@v6
with:
diff --git a/.github/workflows/python-test-coverage.yml b/.github/workflows/python-test-coverage.yml
index dd260ba5f6..6268e7d47d 100644
--- a/.github/workflows/python-test-coverage.yml
+++ b/.github/workflows/python-test-coverage.yml
@@ -20,7 +20,7 @@ jobs:
env:
UV_PYTHON: "3.10"
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
# Save the PR number to a file since the workflow_run event
# in the coverage report workflow does not have access to it
- name: Save PR number
diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml
index 697a8ff4a7..07b9200a46 100644
--- a/.github/workflows/python-tests.yml
+++ b/.github/workflows/python-tests.yml
@@ -27,7 +27,7 @@ jobs:
run:
working-directory: python
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
- name: Set up python and install the project
id: python-setup
uses: ./.github/actions/python-setup
diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props
index 89be7f3520..c7a051bf83 100644
--- a/dotnet/Directory.Packages.props
+++ b/dotnet/Directory.Packages.props
@@ -23,7 +23,7 @@
-
+
@@ -102,7 +102,7 @@
-
+
diff --git a/dotnet/agent-framework-dotnet.slnx b/dotnet/agent-framework-dotnet.slnx
index ec197f58c0..5e08a766f9 100644
--- a/dotnet/agent-framework-dotnet.slnx
+++ b/dotnet/agent-framework-dotnet.slnx
@@ -129,6 +129,7 @@
+
diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/Agent_OpenAI_Step05_Conversation.csproj b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/Agent_OpenAI_Step05_Conversation.csproj
new file mode 100644
index 0000000000..eeda3eef6f
--- /dev/null
+++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/Agent_OpenAI_Step05_Conversation.csproj
@@ -0,0 +1,15 @@
+
+
+
+ Exe
+ net10.0
+
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/Program.cs b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/Program.cs
new file mode 100644
index 0000000000..9f81a27dda
--- /dev/null
+++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/Program.cs
@@ -0,0 +1,98 @@
+// Copyright (c) Microsoft. All rights reserved.
+
+// This sample demonstrates how to maintain conversation state using the OpenAIResponseClientAgent
+// and AgentThread. By passing the same thread to multiple agent invocations, the agent
+// automatically maintains the conversation history, allowing the AI model to understand
+// context from previous exchanges.
+
+using System.ClientModel;
+using System.ClientModel.Primitives;
+using System.Text.Json;
+using Microsoft.Agents.AI;
+using Microsoft.Extensions.AI;
+using OpenAI;
+using OpenAI.Chat;
+using OpenAI.Conversations;
+
+string apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY is not set.");
+string model = Environment.GetEnvironmentVariable("OPENAI_MODEL") ?? "gpt-4o-mini";
+
+// Create a ConversationClient directly from OpenAIClient
+OpenAIClient openAIClient = new(apiKey);
+ConversationClient conversationClient = openAIClient.GetConversationClient();
+
+// Create an agent directly from the OpenAIResponseClient using OpenAIResponseClientAgent
+ChatClientAgent agent = new(openAIClient.GetOpenAIResponseClient(model).AsIChatClient(), instructions: "You are a helpful assistant.", name: "ConversationAgent");
+
+ClientResult createConversationResult = await conversationClient.CreateConversationAsync(BinaryContent.Create(BinaryData.FromString("{}")));
+
+using JsonDocument createConversationResultAsJson = JsonDocument.Parse(createConversationResult.GetRawResponse().Content.ToString());
+string conversationId = createConversationResultAsJson.RootElement.GetProperty("id"u8)!.GetString()!;
+
+// Create a thread for the conversation - this enables conversation state management for subsequent turns
+AgentThread thread = agent.GetNewThread(conversationId);
+
+Console.WriteLine("=== Multi-turn Conversation Demo ===\n");
+
+// First turn: Ask about a topic
+Console.WriteLine("User: What is the capital of France?");
+UserChatMessage firstMessage = new("What is the capital of France?");
+
+// After this call, the conversation state associated in the options is stored in 'thread' and used in subsequent calls
+ChatCompletion firstResponse = await agent.RunAsync([firstMessage], thread);
+Console.WriteLine($"Assistant: {firstResponse.Content.Last().Text}\n");
+
+// Second turn: Follow-up question that relies on conversation context
+Console.WriteLine("User: What famous landmarks are located there?");
+UserChatMessage secondMessage = new("What famous landmarks are located there?");
+
+ChatCompletion secondResponse = await agent.RunAsync([secondMessage], thread);
+Console.WriteLine($"Assistant: {secondResponse.Content.Last().Text}\n");
+
+// Third turn: Another follow-up that demonstrates context continuity
+Console.WriteLine("User: How tall is the most famous one?");
+UserChatMessage thirdMessage = new("How tall is the most famous one?");
+
+ChatCompletion thirdResponse = await agent.RunAsync([thirdMessage], thread);
+Console.WriteLine($"Assistant: {thirdResponse.Content.Last().Text}\n");
+
+Console.WriteLine("=== End of Conversation ===");
+
+// Show full conversation history
+Console.WriteLine("Full Conversation History:");
+ClientResult getConversationResult = await conversationClient.GetConversationAsync(conversationId);
+
+Console.WriteLine("Conversation created.");
+Console.WriteLine($" Conversation ID: {conversationId}");
+Console.WriteLine();
+
+CollectionResult getConversationItemsResults = conversationClient.GetConversationItems(conversationId);
+foreach (ClientResult result in getConversationItemsResults.GetRawPages())
+{
+ Console.WriteLine("Message contents retrieved. Order is most recent first by default.");
+ using JsonDocument getConversationItemsResultAsJson = JsonDocument.Parse(result.GetRawResponse().Content.ToString());
+ foreach (JsonElement element in getConversationItemsResultAsJson.RootElement.GetProperty("data").EnumerateArray())
+ {
+ string messageId = element.GetProperty("id"u8).ToString();
+ string messageRole = element.GetProperty("role"u8).ToString();
+ Console.WriteLine($" Message ID: {messageId}");
+ Console.WriteLine($" Message Role: {messageRole}");
+
+ foreach (var content in element.GetProperty("content").EnumerateArray())
+ {
+ string messageContentText = content.GetProperty("text"u8).ToString();
+ Console.WriteLine($" Message Text: {messageContentText}");
+ }
+ Console.WriteLine();
+ }
+}
+
+ClientResult deleteConversationResult = conversationClient.DeleteConversation(conversationId);
+using JsonDocument deleteConversationResultAsJson = JsonDocument.Parse(deleteConversationResult.GetRawResponse().Content.ToString());
+bool deleted = deleteConversationResultAsJson.RootElement
+ .GetProperty("deleted"u8)
+ .GetBoolean();
+
+Console.WriteLine("Conversation deleted.");
+Console.WriteLine($" Deleted: {deleted}");
+Console.WriteLine();
diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/README.md b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/README.md
new file mode 100644
index 0000000000..c279ba2c17
--- /dev/null
+++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step05_Conversation/README.md
@@ -0,0 +1,90 @@
+# Managing Conversation State with OpenAI
+
+This sample demonstrates how to maintain conversation state across multiple turns using the Agent Framework with OpenAI's Conversation API.
+
+## What This Sample Shows
+
+- **Conversation State Management**: Shows how to use `ConversationClient` and `AgentThread` to maintain conversation context across multiple agent invocations
+- **Multi-turn Conversations**: Demonstrates follow-up questions that rely on context from previous messages in the conversation
+- **Server-Side Storage**: Uses OpenAI's Conversation API to manage conversation history server-side, allowing the model to access previous messages without resending them
+- **Conversation Lifecycle**: Demonstrates creating, retrieving, and deleting conversations
+
+## Key Concepts
+
+### ConversationClient for Server-Side Storage
+
+The `ConversationClient` manages conversations on OpenAI's servers:
+
+```csharp
+// Create a ConversationClient from OpenAIClient
+OpenAIClient openAIClient = new(apiKey);
+ConversationClient conversationClient = openAIClient.GetConversationClient();
+
+// Create a new conversation
+ClientResult createConversationResult = await conversationClient.CreateConversationAsync(BinaryContent.Create(BinaryData.FromString("{}")));
+```
+
+### AgentThread for Conversation State
+
+The `AgentThread` works with `ChatClientAgentRunOptions` to link the agent to a server-side conversation:
+
+```csharp
+// Set up agent run options with the conversation ID
+ChatClientAgentRunOptions agentRunOptions = new() { ChatOptions = new ChatOptions() { ConversationId = conversationId } };
+
+// Create a thread for the conversation
+AgentThread thread = agent.GetNewThread();
+
+// First call links the thread to the conversation
+ChatCompletion firstResponse = await agent.RunAsync([firstMessage], thread, agentRunOptions);
+
+// Subsequent calls use the thread without needing to pass options again
+ChatCompletion secondResponse = await agent.RunAsync([secondMessage], thread);
+```
+
+### Retrieving Conversation History
+
+You can retrieve the full conversation history from the server:
+
+```csharp
+CollectionResult getConversationItemsResults = conversationClient.GetConversationItems(conversationId);
+foreach (ClientResult result in getConversationItemsResults.GetRawPages())
+{
+ // Process conversation items
+}
+```
+
+### How It Works
+
+1. **Create an OpenAI Client**: Initialize an `OpenAIClient` with your API key
+2. **Create a Conversation**: Use `ConversationClient` to create a server-side conversation
+3. **Create an Agent**: Initialize an `OpenAIResponseClientAgent` with the desired model and instructions
+4. **Create a Thread**: Call `agent.GetNewThread()` to create a new conversation thread
+5. **Link Thread to Conversation**: Pass `ChatClientAgentRunOptions` with the `ConversationId` on the first call
+6. **Send Messages**: Subsequent calls to `agent.RunAsync()` only need the thread - context is maintained
+7. **Cleanup**: Delete the conversation when done using `conversationClient.DeleteConversation()`
+
+## Running the Sample
+
+1. Set the required environment variables:
+ ```powershell
+ $env:OPENAI_API_KEY = "your_api_key_here"
+ $env:OPENAI_MODEL = "gpt-4o-mini"
+ ```
+
+2. Run the sample:
+ ```powershell
+ dotnet run
+ ```
+
+## Expected Output
+
+The sample demonstrates a three-turn conversation where each follow-up question relies on context from previous messages:
+
+1. First question asks about the capital of France
+2. Second question asks about landmarks "there" - requiring understanding of the previous answer
+3. Third question asks about "the most famous one" - requiring context from both previous turns
+
+After the conversation, the sample retrieves and displays the full conversation history from the server, then cleans up by deleting the conversation.
+
+This demonstrates that the conversation state is properly maintained across multiple agent invocations using OpenAI's server-side conversation storage.
diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/README.md b/dotnet/samples/GettingStarted/AgentWithOpenAI/README.md
index 6f2c77f39b..019af7f2b6 100644
--- a/dotnet/samples/GettingStarted/AgentWithOpenAI/README.md
+++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/README.md
@@ -13,4 +13,5 @@ Agent Framework provides additional support to allow OpenAI developers to use th
|[Creating an AIAgent](./Agent_OpenAI_Step01_Running/)|This sample demonstrates how to create and run a basic agent with native OpenAI SDK types. Shows both regular and streaming invocation of the agent.|
|[Using Reasoning Capabilities](./Agent_OpenAI_Step02_Reasoning/)|This sample demonstrates how to create an AI agent with reasoning capabilities using OpenAI's reasoning models and response types.|
|[Creating an Agent from a ChatClient](./Agent_OpenAI_Step03_CreateFromChatClient/)|This sample demonstrates how to create an AI agent directly from an OpenAI.Chat.ChatClient instance using OpenAIChatClientAgent.|
-|[Creating an Agent from an OpenAIResponseClient](./Agent_OpenAI_Step04_CreateFromOpenAIResponseClient/)|This sample demonstrates how to create an AI agent directly from an OpenAI.Responses.OpenAIResponseClient instance using OpenAIResponseClientAgent.|
\ No newline at end of file
+|[Creating an Agent from an OpenAIResponseClient](./Agent_OpenAI_Step04_CreateFromOpenAIResponseClient/)|This sample demonstrates how to create an AI agent directly from an OpenAI.Responses.OpenAIResponseClient instance using OpenAIResponseClientAgent.|
+|[Managing Conversation State](./Agent_OpenAI_Step05_Conversation/)|This sample demonstrates how to maintain conversation state across multiple turns using the AgentThread for context continuity.|
\ No newline at end of file
diff --git a/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj b/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj
index 21146dd2dd..d2c0ea70f8 100644
--- a/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj
+++ b/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj
@@ -36,9 +36,9 @@
-
+
-
+
diff --git a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj
index 118b92e074..e67846f54c 100644
--- a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj
+++ b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj
@@ -37,7 +37,7 @@
-
+
diff --git a/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj
index c0fca8a340..a865f43be5 100644
--- a/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj
+++ b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj
@@ -37,7 +37,7 @@
-
+
diff --git a/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs b/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs
index bdb1f72928..e804fbb389 100644
--- a/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs
@@ -198,7 +198,7 @@ public override async IAsyncEnumerable RunStreamingAsync
}
///
- public override string Id => this._id ?? base.Id;
+ protected override string? IdCore => this._id;
///
public override string? Name => this._name ?? base.Name;
diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs
index eba6f84687..4cff385dcc 100644
--- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs
@@ -22,9 +22,6 @@ namespace Microsoft.Agents.AI;
[DebuggerDisplay("{DisplayName,nq}")]
public abstract class AIAgent
{
- /// Default ID of this agent instance.
- private readonly string _id = Guid.NewGuid().ToString("N");
-
///
/// Gets the unique identifier for this agent instance.
///
@@ -37,7 +34,19 @@ public abstract class AIAgent
/// agent instances in multi-agent scenarios. They should remain stable for the lifetime
/// of the agent instance.
///
- public virtual string Id => this._id;
+ public string Id { get => this.IdCore ?? field; } = Guid.NewGuid().ToString("N");
+
+ ///
+ /// Gets a custom identifier for the agent, which can be overridden by derived classes.
+ ///
+ ///
+ /// A string representing the agent's identifier, or if the default ID should be used.
+ ///
+ ///
+ /// Derived classes can override this property to provide a custom identifier.
+ /// When is returned, the property will use the default randomly-generated identifier.
+ ///
+ protected virtual string? IdCore => null;
///
/// Gets the human-readable name of the agent.
@@ -61,7 +70,7 @@ public abstract class AIAgent
/// This property provides a guaranteed non-null string suitable for display in user interfaces,
/// logs, or other contexts where a readable identifier is needed.
///
- public virtual string DisplayName => this.Name ?? this.Id ?? this._id; // final fallback to _id in case Id override returns null
+ public virtual string DisplayName => this.Name ?? this.Id;
///
/// Gets a description of the agent's purpose, capabilities, or behavior.
diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs
index 353c82c996..72f1980b1c 100644
--- a/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs
@@ -54,7 +54,7 @@ protected DelegatingAIAgent(AIAgent innerAgent)
protected AIAgent InnerAgent { get; }
///
- public override string Id => this.InnerAgent.Id;
+ protected override string? IdCore => this.InnerAgent.Id;
///
public override string? Name => this.InnerAgent.Name;
diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIMiddleware.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIMiddleware.cs
index 2d6fcbd7e4..ac585ad39a 100644
--- a/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIMiddleware.cs
+++ b/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIMiddleware.cs
@@ -81,7 +81,7 @@ public async Task HandleRequestAsync(HttpContext context)
}
context.Response.StatusCode = StatusCodes.Status301MovedPermanently;
- context.Response.Headers.Location = redirectUrl;
+ context.Response.Headers.Location = redirectUrl; // CodeQL [SM04598] justification: The redirect URL is constructed from a server-configured base path (_basePath), not user input. The query string is only appended as parameters and cannot change the redirect destination since this is a relative URL.
if (this._logger.IsEnabled(LogLevel.Debug))
{
diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs b/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs
index 34c9208967..8822ebcc39 100644
--- a/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs
+++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs
@@ -19,7 +19,7 @@ internal sealed class EntityAgentWrapper(
private readonly IServiceProvider? _entityScopedServices = entityScopedServices;
// The ID of the agent is always the entity ID.
- public override string Id => this._entityContext.Id.ToString();
+ protected override string? IdCore => this._entityContext.Id.ToString();
public override async Task RunAsync(
IEnumerable messages,
diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs
index 98dc5903bf..70fcee15df 100644
--- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs
@@ -39,7 +39,7 @@ public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = nu
this._describeTask = this._workflow.DescribeProtocolAsync().AsTask();
}
- public override string Id => this._id ?? base.Id;
+ protected override string? IdCore => this._id;
public override string? Name { get; }
public override string? Description { get; }
diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs
index df7477241c..a5a34d24a9 100644
--- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs
@@ -121,7 +121,7 @@ public ChatClientAgent(IChatClient chatClient, ChatClientAgentOptions? options,
public IChatClient ChatClient { get; }
///
- public override string Id => this._agentOptions?.Id ?? base.Id;
+ protected override string? IdCore => this._agentOptions?.Id;
///
public override string? Name => this._agentOptions?.Name;
diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs
index 5111a97ad1..e3bda2081a 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs
@@ -214,13 +214,31 @@ public async Task InvokeStreamingWithSingleMessageCallsMockedInvokeWithMessageIn
[Fact]
public void ValidateAgentIDIsIdempotent()
{
+ // Arrange
var agent = new MockAgent();
+ // Act
string id = agent.Id;
+
+ // Assert
Assert.NotNull(id);
Assert.Equal(id, agent.Id);
}
+ [Fact]
+ public void ValidateAgentIDCanBeProvidedByDerivedAgentClass()
+ {
+ // Arrange
+ var agent = new MockAgent(id: "test-agent-id");
+
+ // Act
+ string id = agent.Id;
+
+ // Assert
+ Assert.NotNull(id);
+ Assert.Equal("test-agent-id", id);
+ }
+
#region GetService Method Tests
///
@@ -344,6 +362,13 @@ public abstract class TestAgentThread : AgentThread;
private sealed class MockAgent : AIAgent
{
+ public MockAgent(string? id = null)
+ {
+ this.IdCore = id;
+ }
+
+ protected override string? IdCore { get; }
+
public override AgentThread GetNewThread()
=> throw new NotImplementedException();
diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs
index 4dca99a77c..50271b7eee 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs
@@ -6,6 +6,7 @@
using System.Threading.Tasks;
using Microsoft.Extensions.AI;
using Moq;
+using Moq.Protected;
namespace Microsoft.Agents.AI.Abstractions.UnitTests;
@@ -31,7 +32,7 @@ public DelegatingAIAgentTests()
this._testThread = new TestAgentThread();
// Setup inner agent mock
- this._innerAgentMock.Setup(x => x.Id).Returns("test-agent-id");
+ this._innerAgentMock.Protected().SetupGet("IdCore").Returns("test-agent-id");
this._innerAgentMock.Setup(x => x.Name).Returns("Test Agent");
this._innerAgentMock.Setup(x => x.Description).Returns("Test Description");
this._innerAgentMock.Setup(x => x.GetNewThread()).Returns(this._testThread);
@@ -93,7 +94,7 @@ public void Id_DelegatesToInnerAgent()
// Assert
Assert.Equal("test-agent-id", id);
- this._innerAgentMock.Verify(x => x.Id, Times.Once);
+ this._innerAgentMock.Protected().VerifyGet("IdCore", Times.Once());
}
///
diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs
index 5bc4e8afad..69560421cf 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs
@@ -276,15 +276,9 @@ public async ValueTask DisposeAsync()
[SuppressMessage("Performance", "CA1812:Avoid uninstantiated internal classes", Justification = "Instantiated via dependency injection")]
internal sealed class FakeChatClientAgent : AIAgent
{
- public FakeChatClientAgent()
- {
- this.Id = "fake-agent";
- this.Description = "A fake agent for testing";
- }
-
- public override string Id { get; }
+ protected override string? IdCore => "fake-agent";
- public override string? Description { get; }
+ public override string? Description => "A fake agent for testing";
public override AgentThread GetNewThread()
{
@@ -350,15 +344,9 @@ public FakeInMemoryAgentThread(JsonElement serializedThread, JsonSerializerOptio
[SuppressMessage("Performance", "CA1812:Avoid uninstantiated internal classes", Justification = "Instantiated via dependency injection")]
internal sealed class FakeMultiMessageAgent : AIAgent
{
- public FakeMultiMessageAgent()
- {
- this.Id = "fake-multi-message-agent";
- this.Description = "A fake agent that sends multiple messages for testing";
- }
-
- public override string Id { get; }
+ protected override string? IdCore => "fake-multi-message-agent";
- public override string? Description { get; }
+ public override string? Description => "A fake agent that sends multiple messages for testing";
public override AgentThread GetNewThread()
{
diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs
index 78a3048747..3e80a58369 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs
@@ -421,7 +421,7 @@ private static List ParseSseEvents(string responseContent)
private sealed class MultiResponseAgent : AIAgent
{
- public override string Id => "multi-response-agent";
+ protected override string? IdCore => "multi-response-agent";
public override string? Description => "Agent that produces multiple text chunks";
@@ -510,7 +510,7 @@ public TestInMemoryAgentThread(JsonElement serializedThreadState, JsonSerializer
private sealed class TestAgent : AIAgent
{
- public override string Id => "test-agent";
+ protected override string? IdCore => "test-agent";
public override string? Description => "Test agent";
diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs
index a0e57006ed..16a51876d0 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs
@@ -57,7 +57,7 @@ internal sealed class HelloAgent(string id = nameof(HelloAgent)) : AIAgent
public const string Greeting = "Hello World!";
public const string DefaultId = nameof(HelloAgent);
- public override string Id => id;
+ protected override string? IdCore => id;
public override string? Name => id;
public override AgentThread GetNewThread()
diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs
index b93d7862d5..daff2c248e 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs
@@ -19,7 +19,7 @@ public class SpecializedExecutorSmokeTests
{
public class TestAIAgent(List? messages = null, string? id = null, string? name = null) : AIAgent
{
- public override string Id => id ?? base.Id;
+ protected override string? IdCore => id;
public override string? Name => name;
public static List ToChatMessages(params string[] messages)
diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs
index 4966585211..9ddc94cf71 100644
--- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs
+++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs
@@ -13,7 +13,7 @@ namespace Microsoft.Agents.AI.Workflows.UnitTests;
internal class TestEchoAgent(string? id = null, string? name = null, string? prefix = null) : AIAgent
{
- public override string Id => id ?? base.Id;
+ protected override string? IdCore => id;
public override string? Name => name ?? base.Name;
public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null)
diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md
index 54171d870a..b9bb82d6e7 100644
--- a/python/CHANGELOG.md
+++ b/python/CHANGELOG.md
@@ -7,6 +7,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [1.0.0b251211] - 2025-12-11
+
+### Added
+
+- **agent-framework-core**: Extend HITL support for all orchestration patterns (#2620)
+- **agent-framework-core**: Add factory pattern to concurrent orchestration builder (#2738)
+- **agent-framework-core**: Add factory pattern to sequential orchestration builder (#2710)
+- **agent-framework-azure-ai**: Capture file IDs from code interpreter in streaming responses (#2741)
+
+### Changed
+
+- **agent-framework-azurefunctions**: Change DurableAIAgent log level from warning to debug when invoked without thread (#2736)
+
+### Fixed
+
+- **agent-framework-core**: Added more complete parsing for mcp tool arguments (#2756)
+- **agent-framework-core**: Fix GroupChat ManagerSelectionResponse JSON Schema for OpenAI Structured Outputs (#2750)
+- **samples**: Standardize OpenAI API key environment variable naming (#2629)
+
## [1.0.0b251209] - 2025-12-09
### Added
@@ -347,7 +366,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/).
-[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251209...HEAD
+[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251211...HEAD
+[1.0.0b251211]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251209...python-1.0.0b251211
[1.0.0b251209]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251204...python-1.0.0b251209
[1.0.0b251204]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251120...python-1.0.0b251204
[1.0.0b251120]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251117...python-1.0.0b251120
diff --git a/python/packages/a2a/pyproject.toml b/python/packages/a2a/pyproject.toml
index 0233d66628..8ac7a0abbc 100644
--- a/python/packages/a2a/pyproject.toml
+++ b/python/packages/a2a/pyproject.toml
@@ -4,7 +4,7 @@ description = "A2A integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/ag-ui/pyproject.toml b/python/packages/ag-ui/pyproject.toml
index 0a501b16be..805b2b55e4 100644
--- a/python/packages/ag-ui/pyproject.toml
+++ b/python/packages/ag-ui/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "agent-framework-ag-ui"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
description = "AG-UI protocol integration for Agent Framework"
readme = "README.md"
license-files = ["LICENSE"]
diff --git a/python/packages/anthropic/pyproject.toml b/python/packages/anthropic/pyproject.toml
index 272a612c0a..7cdc807541 100644
--- a/python/packages/anthropic/pyproject.toml
+++ b/python/packages/anthropic/pyproject.toml
@@ -4,7 +4,7 @@ description = "Anthropic integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/azure-ai-search/pyproject.toml b/python/packages/azure-ai-search/pyproject.toml
index cda63bf6ca..ebb897df8c 100644
--- a/python/packages/azure-ai-search/pyproject.toml
+++ b/python/packages/azure-ai-search/pyproject.toml
@@ -4,7 +4,7 @@ description = "Azure AI Search integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py
index b4c55f0e55..0ea9ee1f05 100644
--- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py
+++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py
@@ -63,6 +63,8 @@
McpTool,
MessageDeltaChunk,
MessageDeltaTextContent,
+ MessageDeltaTextFileCitationAnnotation,
+ MessageDeltaTextFilePathAnnotation,
MessageDeltaTextUrlCitationAnnotation,
MessageImageUrlParam,
MessageInputContentBlock,
@@ -471,6 +473,45 @@ def _extract_url_citations(
return url_citations
+ def _extract_file_path_contents(self, message_delta_chunk: MessageDeltaChunk) -> list[HostedFileContent]:
+ """Extract file references from MessageDeltaChunk annotations.
+
+ Code interpreter generates files that are referenced via file path or file citation
+ annotations in the message content. This method extracts those file IDs and returns
+ them as HostedFileContent objects.
+
+ Handles two annotation types:
+ - MessageDeltaTextFilePathAnnotation: Contains file_path.file_id
+ - MessageDeltaTextFileCitationAnnotation: Contains file_citation.file_id
+
+ Args:
+ message_delta_chunk: The message delta chunk to process
+
+ Returns:
+ List of HostedFileContent objects for any files referenced in annotations
+ """
+ file_contents: list[HostedFileContent] = []
+
+ for content in message_delta_chunk.delta.content:
+ if isinstance(content, MessageDeltaTextContent) and content.text and content.text.annotations:
+ for annotation in content.text.annotations:
+ if isinstance(annotation, MessageDeltaTextFilePathAnnotation):
+ # Extract file_id from the file_path annotation
+ file_path = getattr(annotation, "file_path", None)
+ if file_path is not None:
+ file_id = getattr(file_path, "file_id", None)
+ if file_id:
+ file_contents.append(HostedFileContent(file_id=file_id))
+ elif isinstance(annotation, MessageDeltaTextFileCitationAnnotation):
+ # Extract file_id from the file_citation annotation
+ file_citation = getattr(annotation, "file_citation", None)
+ if file_citation is not None:
+ file_id = getattr(file_citation, "file_id", None)
+ if file_id:
+ file_contents.append(HostedFileContent(file_id=file_id))
+
+ return file_contents
+
def _get_real_url_from_citation_reference(
self, citation_url: str, azure_search_tool_calls: list[dict[str, Any]]
) -> str:
@@ -530,6 +571,9 @@ async def _process_stream(
# Extract URL citations from the delta chunk
url_citations = self._extract_url_citations(event_data, azure_search_tool_calls)
+ # Extract file path contents from code interpreter outputs
+ file_contents = self._extract_file_path_contents(event_data)
+
# Create contents with citations if any exist
citation_content: list[Contents] = []
if event_data.text or url_citations:
@@ -538,6 +582,9 @@ async def _process_stream(
text_content_obj.annotations = url_citations
citation_content.append(text_content_obj)
+ # Add file contents from file path annotations
+ citation_content.extend(file_contents)
+
yield ChatResponseUpdate(
role=role,
contents=citation_content if citation_content else None,
diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml
index 4d85dffdab..c20a28dc2e 100644
--- a/python/packages/azure-ai/pyproject.toml
+++ b/python/packages/azure-ai/pyproject.toml
@@ -4,7 +4,7 @@ description = "Azure AI Foundry integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py
index 80b2cebdb3..f1b4dafb63 100644
--- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py
+++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py
@@ -24,6 +24,7 @@
FunctionCallContent,
FunctionResultContent,
HostedCodeInterpreterTool,
+ HostedFileContent,
HostedFileSearchTool,
HostedMCPTool,
HostedVectorStoreContent,
@@ -42,6 +43,8 @@
FileInfo,
MessageDeltaChunk,
MessageDeltaTextContent,
+ MessageDeltaTextFileCitationAnnotation,
+ MessageDeltaTextFilePathAnnotation,
MessageDeltaTextUrlCitationAnnotation,
RequiredFunctionToolCall,
RequiredMcpToolCall,
@@ -1362,6 +1365,108 @@ def test_azure_ai_chat_client_extract_url_citations_with_citations(mock_agents_c
assert citation.annotated_regions[0].end_index == 20
+def test_azure_ai_chat_client_extract_file_path_contents_with_file_path_annotation(
+ mock_agents_client: MagicMock,
+) -> None:
+ """Test _extract_file_path_contents with MessageDeltaChunk containing file path annotation."""
+ chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent")
+
+ # Create mock file_path annotation
+ mock_file_path = MagicMock()
+ mock_file_path.file_id = "assistant-test-file-123"
+
+ mock_annotation = MagicMock(spec=MessageDeltaTextFilePathAnnotation)
+ mock_annotation.file_path = mock_file_path
+
+ # Create mock text content with annotations
+ mock_text = MagicMock()
+ mock_text.annotations = [mock_annotation]
+
+ mock_text_content = MagicMock(spec=MessageDeltaTextContent)
+ mock_text_content.text = mock_text
+
+ # Create mock delta
+ mock_delta = MagicMock()
+ mock_delta.content = [mock_text_content]
+
+ # Create mock MessageDeltaChunk
+ mock_chunk = MagicMock(spec=MessageDeltaChunk)
+ mock_chunk.delta = mock_delta
+
+ # Call the method
+ file_contents = chat_client._extract_file_path_contents(mock_chunk)
+
+ # Verify results
+ assert len(file_contents) == 1
+ assert isinstance(file_contents[0], HostedFileContent)
+ assert file_contents[0].file_id == "assistant-test-file-123"
+
+
+def test_azure_ai_chat_client_extract_file_path_contents_with_file_citation_annotation(
+ mock_agents_client: MagicMock,
+) -> None:
+ """Test _extract_file_path_contents with MessageDeltaChunk containing file citation annotation."""
+ chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent")
+
+ # Create mock file_citation annotation
+ mock_file_citation = MagicMock()
+ mock_file_citation.file_id = "cfile_test-citation-456"
+
+ mock_annotation = MagicMock(spec=MessageDeltaTextFileCitationAnnotation)
+ mock_annotation.file_citation = mock_file_citation
+
+ # Create mock text content with annotations
+ mock_text = MagicMock()
+ mock_text.annotations = [mock_annotation]
+
+ mock_text_content = MagicMock(spec=MessageDeltaTextContent)
+ mock_text_content.text = mock_text
+
+ # Create mock delta
+ mock_delta = MagicMock()
+ mock_delta.content = [mock_text_content]
+
+ # Create mock MessageDeltaChunk
+ mock_chunk = MagicMock(spec=MessageDeltaChunk)
+ mock_chunk.delta = mock_delta
+
+ # Call the method
+ file_contents = chat_client._extract_file_path_contents(mock_chunk)
+
+ # Verify results
+ assert len(file_contents) == 1
+ assert isinstance(file_contents[0], HostedFileContent)
+ assert file_contents[0].file_id == "cfile_test-citation-456"
+
+
+def test_azure_ai_chat_client_extract_file_path_contents_empty_annotations(
+ mock_agents_client: MagicMock,
+) -> None:
+ """Test _extract_file_path_contents with no annotations returns empty list."""
+ chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent")
+
+ # Create mock text content with no annotations
+ mock_text = MagicMock()
+ mock_text.annotations = []
+
+ mock_text_content = MagicMock(spec=MessageDeltaTextContent)
+ mock_text_content.text = mock_text
+
+ # Create mock delta
+ mock_delta = MagicMock()
+ mock_delta.content = [mock_text_content]
+
+ # Create mock MessageDeltaChunk
+ mock_chunk = MagicMock(spec=MessageDeltaChunk)
+ mock_chunk.delta = mock_delta
+
+ # Call the method
+ file_contents = chat_client._extract_file_path_contents(mock_chunk)
+
+ # Verify results
+ assert len(file_contents) == 0
+
+
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
diff --git a/python/packages/azurefunctions/pyproject.toml b/python/packages/azurefunctions/pyproject.toml
index a7b4e0458f..e6363a65f8 100644
--- a/python/packages/azurefunctions/pyproject.toml
+++ b/python/packages/azurefunctions/pyproject.toml
@@ -4,7 +4,7 @@ description = "Azure Functions integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/chatkit/pyproject.toml b/python/packages/chatkit/pyproject.toml
index 9bb72d7500..6cd6a16146 100644
--- a/python/packages/chatkit/pyproject.toml
+++ b/python/packages/chatkit/pyproject.toml
@@ -4,7 +4,7 @@ description = "OpenAI ChatKit integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/copilotstudio/pyproject.toml b/python/packages/copilotstudio/pyproject.toml
index 4301111482..955fb2cbcd 100644
--- a/python/packages/copilotstudio/pyproject.toml
+++ b/python/packages/copilotstudio/pyproject.toml
@@ -4,7 +4,7 @@ description = "Copilot Studio integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py
index b4caaea4f5..721af6210c 100644
--- a/python/packages/core/agent_framework/_mcp.py
+++ b/python/packages/core/agent_framework/_mcp.py
@@ -1,6 +1,5 @@
# Copyright (c) Microsoft. All rights reserved.
-import json
import logging
import re
import sys
@@ -19,9 +18,9 @@
from mcp.shared.context import RequestContext
from mcp.shared.exceptions import McpError
from mcp.shared.session import RequestResponder
-from pydantic import BaseModel, Field, create_model
+from pydantic import BaseModel, create_model
-from ._tools import AIFunction, HostedMCPSpecificApproval
+from ._tools import AIFunction, HostedMCPSpecificApproval, _build_pydantic_model_from_json_schema
from ._types import (
ChatMessage,
Contents,
@@ -274,95 +273,26 @@ def _get_input_model_from_mcp_prompt(prompt: types.Prompt) -> type[BaseModel]:
if not prompt.arguments:
return create_model(f"{prompt.name}_input")
- field_definitions: dict[str, Any] = {}
- for prompt_argument in prompt.arguments:
- # For prompts, all arguments are typically required and string type
- # unless specified otherwise in the prompt argument
- python_type = str # Default type for prompt arguments
+ # Convert prompt arguments to JSON schema format
+ properties: dict[str, Any] = {}
+ required: list[str] = []
- # Create field definition for create_model
+ for prompt_argument in prompt.arguments:
+ # For prompts, all arguments are typically string type unless specified otherwise
+ properties[prompt_argument.name] = {
+ "type": "string",
+ "description": prompt_argument.description if hasattr(prompt_argument, "description") else "",
+ }
if prompt_argument.required:
- field_definitions[prompt_argument.name] = (python_type, ...)
- else:
- field_definitions[prompt_argument.name] = (python_type, None)
+ required.append(prompt_argument.name)
- return create_model(f"{prompt.name}_input", **field_definitions)
+ schema = {"properties": properties, "required": required}
+ return _build_pydantic_model_from_json_schema(prompt.name, schema)
def _get_input_model_from_mcp_tool(tool: types.Tool) -> type[BaseModel]:
"""Creates a Pydantic model from a tools parameters."""
- properties = tool.inputSchema.get("properties", None)
- required = tool.inputSchema.get("required", [])
- definitions = tool.inputSchema.get("$defs", {})
-
- # Check if 'properties' is missing or not a dictionary
- if not properties:
- return create_model(f"{tool.name}_input")
-
- def resolve_type(prop_details: dict[str, Any]) -> type:
- """Resolve JSON Schema type to Python type, handling $ref."""
- # Handle $ref by resolving the reference
- if "$ref" in prop_details:
- ref = prop_details["$ref"]
- # Extract the reference path (e.g., "#/$defs/CustomerIdParam" -> "CustomerIdParam")
- if ref.startswith("#/$defs/"):
- def_name = ref.split("/")[-1]
- if def_name in definitions:
- # Resolve the reference and use its type
- resolved = definitions[def_name]
- return resolve_type(resolved)
- # If we can't resolve the ref, default to dict for safety
- return dict
-
- # Map JSON Schema types to Python types
- json_type = prop_details.get("type", "string")
- match json_type:
- case "integer":
- return int
- case "number":
- return float
- case "boolean":
- return bool
- case "array":
- return list
- case "object":
- return dict
- case _:
- return str # default
-
- field_definitions: dict[str, Any] = {}
- for prop_name, prop_details in properties.items():
- prop_details = json.loads(prop_details) if isinstance(prop_details, str) else prop_details
-
- python_type = resolve_type(prop_details)
- description = prop_details.get("description", "")
-
- # Build field kwargs (description, array items schema, etc.)
- field_kwargs: dict[str, Any] = {}
- if description:
- field_kwargs["description"] = description
-
- # Preserve array items schema if present
- if prop_details.get("type") == "array" and "items" in prop_details:
- items_schema = prop_details["items"]
- if items_schema and items_schema != {}:
- field_kwargs["json_schema_extra"] = {"items": items_schema}
-
- # Create field definition for create_model
- if prop_name in required:
- if field_kwargs:
- field_definitions[prop_name] = (python_type, Field(**field_kwargs))
- else:
- field_definitions[prop_name] = (python_type, ...)
- else:
- default_value = prop_details.get("default", None)
- field_kwargs["default"] = default_value
- if field_kwargs and any(k != "default" for k in field_kwargs):
- field_definitions[prop_name] = (python_type, Field(**field_kwargs))
- else:
- field_definitions[prop_name] = (python_type, default_value)
-
- return create_model(f"{tool.name}_input", **field_definitions)
+ return _build_pydantic_model_from_json_schema(tool.name, tool.inputSchema)
def _normalize_mcp_name(name: str) -> str:
diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py
index 3657a994e2..bbbf07ab7a 100644
--- a/python/packages/core/agent_framework/_tools.py
+++ b/python/packages/core/agent_framework/_tools.py
@@ -25,7 +25,6 @@
from opentelemetry.metrics import Histogram
from pydantic import AnyUrl, BaseModel, Field, ValidationError, create_model
-from pydantic.fields import FieldInfo
from ._logging import get_logger
from ._serialization import SerializationMixin
@@ -932,6 +931,151 @@ def _create_input_model_from_func(func: Callable[..., Any], name: str) -> type[B
}
+def _build_pydantic_model_from_json_schema(
+ model_name: str,
+ schema: Mapping[str, Any],
+) -> type[BaseModel]:
+ """Creates a Pydantic model from JSON Schema with support for $refs, nested objects, and typed arrays.
+
+ Args:
+ model_name: The name of the model to be created.
+ schema: The JSON Schema definition (should contain 'properties', 'required', '$defs', etc.).
+
+ Returns:
+ The dynamically created Pydantic model class.
+ """
+ properties = schema.get("properties")
+ required = schema.get("required", [])
+ definitions = schema.get("$defs", {})
+
+ # Check if 'properties' is missing or not a dictionary
+ if not properties:
+ return create_model(f"{model_name}_input")
+
+ def _resolve_type(prop_details: dict[str, Any], parent_name: str = "") -> type:
+ """Resolve JSON Schema type to Python type, handling $ref, nested objects, and typed arrays.
+
+ Args:
+ prop_details: The JSON Schema property details
+ parent_name: Name to use for creating nested models (for uniqueness)
+
+ Returns:
+ Python type annotation (could be int, str, list[str], or a nested Pydantic model)
+ """
+ # Handle $ref by resolving the reference
+ if "$ref" in prop_details:
+ ref = prop_details["$ref"]
+ # Extract the reference path (e.g., "#/$defs/CustomerIdParam" -> "CustomerIdParam")
+ if ref.startswith("#/$defs/"):
+ def_name = ref.split("/")[-1]
+ if def_name in definitions:
+ # Resolve the reference and use its type
+ resolved = definitions[def_name]
+ return _resolve_type(resolved, def_name)
+ # If we can't resolve the ref, default to dict for safety
+ return dict
+
+ # Map JSON Schema types to Python types
+ json_type = prop_details.get("type", "string")
+ match json_type:
+ case "integer":
+ return int
+ case "number":
+ return float
+ case "boolean":
+ return bool
+ case "array":
+ # Handle typed arrays
+ items_schema = prop_details.get("items")
+ if items_schema and isinstance(items_schema, dict):
+ # Recursively resolve the item type
+ item_type = _resolve_type(items_schema, f"{parent_name}_item")
+ # Return list[ItemType] instead of bare list
+ return list[item_type] # type: ignore
+ # If no items schema or invalid, return bare list
+ return list
+ case "object":
+ # Handle nested objects by creating a nested Pydantic model
+ nested_properties = prop_details.get("properties")
+ nested_required = prop_details.get("required", [])
+
+ if nested_properties and isinstance(nested_properties, dict):
+ # Create the name for the nested model
+ nested_model_name = f"{parent_name}_nested" if parent_name else "NestedModel"
+
+ # Recursively build field definitions for the nested model
+ nested_field_definitions: dict[str, Any] = {}
+ for nested_prop_name, nested_prop_details in nested_properties.items():
+ nested_prop_details = (
+ json.loads(nested_prop_details)
+ if isinstance(nested_prop_details, str)
+ else nested_prop_details
+ )
+
+ nested_python_type = _resolve_type(
+ nested_prop_details, f"{nested_model_name}_{nested_prop_name}"
+ )
+ nested_description = nested_prop_details.get("description", "")
+
+ # Build field kwargs for nested property
+ nested_field_kwargs: dict[str, Any] = {}
+ if nested_description:
+ nested_field_kwargs["description"] = nested_description
+
+ # Create field definition
+ if nested_prop_name in nested_required:
+ nested_field_definitions[nested_prop_name] = (
+ (
+ nested_python_type,
+ Field(**nested_field_kwargs),
+ )
+ if nested_field_kwargs
+ else (nested_python_type, ...)
+ )
+ else:
+ nested_field_kwargs["default"] = nested_prop_details.get("default", None)
+ nested_field_definitions[nested_prop_name] = (
+ nested_python_type,
+ Field(**nested_field_kwargs),
+ )
+
+ # Create and return the nested Pydantic model
+ return create_model(nested_model_name, **nested_field_definitions) # type: ignore
+
+ # If no properties defined, return bare dict
+ return dict
+ case _:
+ return str # default
+
+ field_definitions: dict[str, Any] = {}
+ for prop_name, prop_details in properties.items():
+ prop_details = json.loads(prop_details) if isinstance(prop_details, str) else prop_details
+
+ python_type = _resolve_type(prop_details, f"{model_name}_{prop_name}")
+ description = prop_details.get("description", "")
+
+ # Build field kwargs (description, etc.)
+ field_kwargs: dict[str, Any] = {}
+ if description:
+ field_kwargs["description"] = description
+
+ # Create field definition for create_model
+ if prop_name in required:
+ if field_kwargs:
+ field_definitions[prop_name] = (python_type, Field(**field_kwargs))
+ else:
+ field_definitions[prop_name] = (python_type, ...)
+ else:
+ default_value = prop_details.get("default", None)
+ field_kwargs["default"] = default_value
+ if field_kwargs and any(k != "default" for k in field_kwargs):
+ field_definitions[prop_name] = (python_type, Field(**field_kwargs))
+ else:
+ field_definitions[prop_name] = (python_type, default_value)
+
+ return create_model(f"{model_name}_input", **field_definitions)
+
+
def _create_model_from_json_schema(tool_name: str, schema_json: Mapping[str, Any]) -> type[BaseModel]:
"""Creates a Pydantic model from a given JSON Schema.
@@ -948,29 +1092,8 @@ def _create_model_from_json_schema(tool_name: str, schema_json: Mapping[str, Any
f"JSON schema for tool '{tool_name}' must contain a 'properties' key of type dict. "
f"Got: {schema_json.get('properties', None)}"
)
- # Extract field definitions with type annotations
- field_definitions: dict[str, tuple[type, FieldInfo]] = {}
- for field_name, field_schema in schema_json["properties"].items():
- field_args: dict[str, Any] = {}
- if (field_description := field_schema.get("description", None)) is not None:
- field_args["description"] = field_description
- if (field_default := field_schema.get("default", None)) is not None:
- field_args["default"] = field_default
- field_type = field_schema.get("type", None)
- if field_type is None:
- raise ValueError(
- f"Missing 'type' for field '{field_name}' in JSON schema. "
- f"Got: {field_schema}, Supported types: {list(TYPE_MAPPING.keys())}"
- )
- python_type = TYPE_MAPPING.get(field_type)
- if python_type is None:
- raise ValueError(
- f"Unsupported type '{field_type}' for field '{field_name}' in JSON schema. "
- f"Got: {field_schema}, Supported types: {list(TYPE_MAPPING.keys())}"
- )
- field_definitions[field_name] = (python_type, Field(**field_args))
- return create_model(f"{tool_name}_input", **field_definitions) # type: ignore[call-overload, no-any-return]
+ return _build_pydantic_model_from_json_schema(tool_name, schema_json)
@overload
diff --git a/python/packages/core/agent_framework/_workflows/_concurrent.py b/python/packages/core/agent_framework/_workflows/_concurrent.py
index f6a7b09e60..a6fcaa1a3e 100644
--- a/python/packages/core/agent_framework/_workflows/_concurrent.py
+++ b/python/packages/core/agent_framework/_workflows/_concurrent.py
@@ -3,6 +3,7 @@
import asyncio
import inspect
import logging
+import uuid
from collections.abc import Callable, Sequence
from typing import Any
@@ -189,8 +190,11 @@ class ConcurrentBuilder:
r"""High-level builder for concurrent agent workflows.
- `participants([...])` accepts a list of AgentProtocol (recommended) or Executor.
+ - `register_participants([...])` accepts a list of factories for AgentProtocol (recommended)
+ or Executor factories
- `build()` wires: dispatcher -> fan-out -> participants -> fan-in -> aggregator.
- - `with_custom_aggregator(...)` overrides the default aggregator with an Executor or callback.
+ - `with_aggregator(...)` overrides the default aggregator with an Executor or callback.
+ - `register_aggregator(...)` accepts a factory for an Executor as custom aggregator.
Usage:
@@ -201,14 +205,33 @@ class ConcurrentBuilder:
# Minimal: use default aggregator (returns list[ChatMessage])
workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).build()
+ # With agent factories
+ workflow = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build()
+
# Custom aggregator via callback (sync or async). The callback receives
# list[AgentExecutorResponse] and its return value becomes the workflow's output.
- def summarize(results):
+ def summarize(results: list[AgentExecutorResponse]) -> str:
return " | ".join(r.agent_run_response.messages[-1].text for r in results)
- workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_custom_aggregator(summarize).build()
+ workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_aggregator(summarize).build()
+
+
+ # Custom aggregator via a factory
+ class MyAggregator(Executor):
+ @handler
+ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None:
+ await ctx.yield_output(" | ".join(r.agent_run_response.messages[-1].text for r in results))
+
+
+ workflow = (
+ ConcurrentBuilder()
+ .register_participants([create_agent1, create_agent2, create_agent3])
+ .register_aggregator(lambda: MyAggregator(id="my_aggregator"))
+ .build()
+ )
+
# Enable checkpoint persistence so runs can resume
workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_checkpointing(storage).build()
@@ -219,10 +242,67 @@ def summarize(results):
def __init__(self) -> None:
self._participants: list[AgentProtocol | Executor] = []
+ self._participant_factories: list[Callable[[], AgentProtocol | Executor]] = []
self._aggregator: Executor | None = None
+ self._aggregator_factory: Callable[[], Executor] | None = None
self._checkpoint_storage: CheckpointStorage | None = None
self._request_info_enabled: bool = False
+ def register_participants(
+ self,
+ participant_factories: Sequence[Callable[[], AgentProtocol | Executor]],
+ ) -> "ConcurrentBuilder":
+ r"""Define the parallel participants for this concurrent workflow.
+
+ Accepts factories (callables) that return AgentProtocol instances (e.g., created
+ by a chat client) or Executor instances. Each participant created by a factory
+ is wired as a parallel branch using fan-out edges from an internal dispatcher.
+
+ Args:
+ participant_factories: Sequence of callables returning AgentProtocol or Executor instances
+
+ Raises:
+ ValueError: if `participant_factories` is empty or `.participants()`
+ or `.register_participants()` were already called
+
+ Example:
+
+ .. code-block:: python
+
+ def create_researcher() -> ChatAgent:
+ return ...
+
+
+ def create_marketer() -> ChatAgent:
+ return ...
+
+
+ def create_legal() -> ChatAgent:
+ return ...
+
+
+ class MyCustomExecutor(Executor): ...
+
+
+ wf = ConcurrentBuilder().register_participants([create_researcher, create_marketer, create_legal]).build()
+
+ # Mixing agent(s) and executor(s) is supported
+ wf2 = ConcurrentBuilder().register_participants([create_researcher, MyCustomExecutor]).build()
+ """
+ if self._participants:
+ raise ValueError(
+ "Cannot mix .participants([...]) and .register_participants() in the same builder instance."
+ )
+
+ if self._participant_factories:
+ raise ValueError("register_participants() has already been called on this builder instance.")
+
+ if not participant_factories:
+ raise ValueError("participant_factories cannot be empty")
+
+ self._participant_factories = list(participant_factories)
+ return self
+
def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "ConcurrentBuilder":
r"""Define the parallel participants for this concurrent workflow.
@@ -230,8 +310,12 @@ def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "Con
instances. Each participant is wired as a parallel branch using fan-out edges
from an internal dispatcher.
+ Args:
+ participants: Sequence of AgentProtocol or Executor instances
+
Raises:
- ValueError: if `participants` is empty or contains duplicates
+ ValueError: if `participants` is empty, contains duplicates, or `.register_participants()`
+ or `.participants()` were already called
TypeError: if any entry is not AgentProtocol or Executor
Example:
@@ -243,6 +327,14 @@ def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "Con
# Mixing agent(s) and executor(s) is supported
wf2 = ConcurrentBuilder().participants([researcher_agent, my_custom_executor]).build()
"""
+ if self._participant_factories:
+ raise ValueError(
+ "Cannot mix .participants([...]) and .register_participants() in the same builder instance."
+ )
+
+ if self._participants:
+ raise ValueError("participants() has already been called on this builder instance.")
+
if not participants:
raise ValueError("participants cannot be empty")
@@ -265,38 +357,107 @@ def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "Con
self._participants = list(participants)
return self
- def with_aggregator(self, aggregator: Executor | Callable[..., Any]) -> "ConcurrentBuilder":
- r"""Override the default aggregator with an Executor or a callback.
+ def register_aggregator(self, aggregator_factory: Callable[[], Executor]) -> "ConcurrentBuilder":
+ r"""Define a custom aggregator for this concurrent workflow.
+
+ Accepts a factory (callable) that returns an Executor instance. The executor
+ should handle `list[AgentExecutorResponse]` and yield output using `ctx.yield_output(...)`.
+
+ Args:
+ aggregator_factory: Callable that returns an Executor instance
+
+ Example:
+ .. code-block:: python
+
+ class MyCustomExecutor(Executor): ...
+
- - Executor: must handle `list[AgentExecutorResponse]` and
- yield output using `ctx.yield_output(...)` and add a
- output and the workflow becomes idle.
+ wf = (
+ ConcurrentBuilder()
+ .register_participants([create_researcher, create_marketer, create_legal])
+ .register_aggregator(lambda: MyCustomExecutor(id="my_aggregator"))
+ .build()
+ )
+ """
+ if self._aggregator is not None:
+ raise ValueError(
+ "Cannot mix .with_aggregator(...) and .register_aggregator(...) in the same builder instance."
+ )
+
+ if self._aggregator_factory is not None:
+ raise ValueError("register_aggregator() has already been called on this builder instance.")
+
+ self._aggregator_factory = aggregator_factory
+ return self
+
+ def with_aggregator(
+ self,
+ aggregator: Executor
+ | Callable[[list[AgentExecutorResponse]], Any]
+ | Callable[[list[AgentExecutorResponse], WorkflowContext[Never, Any]], Any],
+ ) -> "ConcurrentBuilder":
+ r"""Override the default aggregator with an executor, an executor factory, or a callback.
+
+ - Executor: must handle `list[AgentExecutorResponse]` and yield output using `ctx.yield_output(...)`
- Callback: sync or async callable with one of the signatures:
`(results: list[AgentExecutorResponse]) -> Any | None` or
`(results: list[AgentExecutorResponse], ctx: WorkflowContext) -> Any | None`.
If the callback returns a non-None value, it becomes the workflow's output.
+ Args:
+ aggregator: Executor instance, or callback function
+
Example:
.. code-block:: python
+ # Executor-based aggregator
+ class CustomAggregator(Executor):
+ @handler
+ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext) -> None:
+ await ctx.yield_output(" | ".join(r.agent_run_response.messages[-1].text for r in results))
+
+
+ wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(CustomAggregator()).build()
+
# Callback-based aggregator (string result)
- async def summarize(results):
+ async def summarize(results: list[AgentExecutorResponse]) -> str:
return " | ".join(r.agent_run_response.messages[-1].text for r in results)
- wf = ConcurrentBuilder().participants([a1, a2, a3]).with_custom_aggregator(summarize).build()
+ wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build()
+
+
+ # Callback-based aggregator (yield result)
+ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None:
+ await ctx.yield_output(" | ".join(r.agent_run_response.messages[-1].text for r in results))
+
+
+ wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build()
"""
+ if self._aggregator_factory is not None:
+ raise ValueError(
+ "Cannot mix .with_aggregator(...) and .register_aggregator(...) in the same builder instance."
+ )
+
+ if self._aggregator is not None:
+ raise ValueError("with_aggregator() has already been called on this builder instance.")
+
if isinstance(aggregator, Executor):
self._aggregator = aggregator
elif callable(aggregator):
self._aggregator = _CallbackAggregator(aggregator)
else:
raise TypeError("aggregator must be an Executor or a callable")
+
return self
def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "ConcurrentBuilder":
- """Enable checkpoint persistence using the provided storage backend."""
+ """Enable checkpoint persistence using the provided storage backend.
+
+ Args:
+ checkpoint_storage: CheckpointStorage instance for persisting workflow state
+ """
self._checkpoint_storage = checkpoint_storage
return self
@@ -329,7 +490,7 @@ def build(self) -> Workflow:
before sending the outputs to the aggregator
- Aggregator yields output and the workflow becomes idle. The output is either:
- list[ChatMessage] (default aggregator: one user + one assistant per agent)
- - custom payload from the provided callback/executor
+ - custom payload from the provided aggregator
Returns:
Workflow: a ready-to-run workflow instance
@@ -343,26 +504,69 @@ def build(self) -> Workflow:
workflow = ConcurrentBuilder().participants([agent1, agent2]).build()
"""
- if not self._participants:
- raise ValueError("No participants provided. Call .participants([...]) first.")
+ if not self._participants and not self._participant_factories:
+ raise ValueError(
+ "No participants provided. Call .participants([...]) or .register_participants([...]) first."
+ )
+ # Internal nodes
dispatcher = _DispatchToAllParticipants(id="dispatcher")
- aggregator = self._aggregator or _AggregateAgentConversations(id="aggregator")
+ aggregator = (
+ self._aggregator
+ if self._aggregator is not None
+ else (
+ self._aggregator_factory()
+ if self._aggregator_factory is not None
+ else _AggregateAgentConversations(id="aggregator")
+ )
+ )
builder = WorkflowBuilder()
- builder.set_start_executor(dispatcher)
- builder.add_fan_out_edges(dispatcher, list(self._participants))
-
- if self._request_info_enabled:
- # Insert interceptor between fan-in and aggregator
- # participants -> fan-in -> interceptor -> aggregator
- request_info_interceptor = RequestInfoInterceptor(executor_id="request_info")
- builder.add_fan_in_edges(list(self._participants), request_info_interceptor)
- builder.add_edge(request_info_interceptor, aggregator)
+ if self._participant_factories:
+ # Register executors/agents to avoid warnings from the workflow builder
+ # if factories are provided instead of direct instances. This doesn't
+ # break the factory pattern since the concurrent builder still creates
+ # new instances per workflow build.
+ factory_names: list[str] = []
+ for factory in self._participant_factories:
+ factory_name = uuid.uuid4().hex
+ factory_names.append(factory_name)
+ instance = factory()
+ if isinstance(instance, Executor):
+ builder.register_executor(lambda executor=instance: executor, name=factory_name) # type: ignore[misc]
+ else:
+ builder.register_agent(lambda agent=instance: agent, name=factory_name) # type: ignore[misc]
+ # Register the dispatcher and the aggregator
+ builder.register_executor(lambda: dispatcher, name="dispatcher")
+ builder.register_executor(lambda: aggregator, name="aggregator")
+
+ builder.set_start_executor("dispatcher")
+ builder.add_fan_out_edges("dispatcher", factory_names)
+ if self._request_info_enabled:
+ # Insert interceptor between fan-in and aggregator
+ # participants -> fan-in -> interceptor -> aggregator
+ builder.register_executor(
+ lambda: RequestInfoInterceptor(executor_id="request_info"),
+ name="request_info_interceptor",
+ )
+ builder.add_fan_in_edges(factory_names, "request_info_interceptor")
+ builder.add_edge("request_info_interceptor", "aggregator")
+ else:
+ # Direct fan-in to aggregator
+ builder.add_fan_in_edges(factory_names, "aggregator")
else:
- # Direct fan-in to aggregator
- builder.add_fan_in_edges(list(self._participants), aggregator)
-
+ builder.set_start_executor(dispatcher)
+ builder.add_fan_out_edges(dispatcher, self._participants)
+
+ if self._request_info_enabled:
+ # Insert interceptor between fan-in and aggregator
+ # participants -> fan-in -> interceptor -> aggregator
+ request_info_interceptor = RequestInfoInterceptor(executor_id="request_info")
+ builder.add_fan_in_edges(self._participants, request_info_interceptor)
+ builder.add_edge(request_info_interceptor, aggregator)
+ else:
+ # Direct fan-in to aggregator
+ builder.add_fan_in_edges(self._participants, aggregator)
if self._checkpoint_storage is not None:
builder = builder.with_checkpointing(self._checkpoint_storage)
diff --git a/python/packages/core/agent_framework/_workflows/_group_chat.py b/python/packages/core/agent_framework/_workflows/_group_chat.py
index eab720a4b1..725a5c829c 100644
--- a/python/packages/core/agent_framework/_workflows/_group_chat.py
+++ b/python/packages/core/agent_framework/_workflows/_group_chat.py
@@ -132,7 +132,11 @@ class ManagerSelectionResponse(BaseModel):
final_message: Optional final message string when finishing conversation (will be converted to ChatMessage)
"""
- model_config = {"extra": "forbid"}
+ model_config = {
+ "extra": "forbid",
+ # OpenAI strict mode requires all properties to be in required array
+ "json_schema_extra": {"required": ["selected_participant", "instruction", "finish", "final_message"]},
+ }
selected_participant: str | None = None
instruction: str | None = None
diff --git a/python/packages/core/agent_framework/_workflows/_workflow_builder.py b/python/packages/core/agent_framework/_workflows/_workflow_builder.py
index 26cd0213e4..5bf36b6ccd 100644
--- a/python/packages/core/agent_framework/_workflows/_workflow_builder.py
+++ b/python/packages/core/agent_framework/_workflows/_workflow_builder.py
@@ -374,7 +374,7 @@ def register_agent(
)
"""
if name in self._executor_registry:
- raise ValueError(f"An executor factory with the name '{name}' is already registered.")
+ raise ValueError(f"An agent factory with the name '{name}' is already registered.")
def wrapped_factory() -> AgentExecutor:
agent = factory_func()
@@ -1148,21 +1148,29 @@ def _resolve_edge_registry(self) -> tuple[Executor, list[Executor], list[EdgeGro
if isinstance(self._start_executor, Executor):
start_executor = self._start_executor
- executors: dict[str, Executor] = {}
+ # Maps registered factory names to created executor instances for edge resolution
+ factory_name_to_instance: dict[str, Executor] = {}
+ # Maps executor IDs to created executor instances to prevent duplicates
+ executor_id_to_instance: dict[str, Executor] = {}
deferred_edge_groups: list[EdgeGroup] = []
for name, exec_factory in self._executor_registry.items():
instance = exec_factory()
+ if instance.id in executor_id_to_instance:
+ raise ValueError(f"Executor with ID '{instance.id}' has already been created.")
+ executor_id_to_instance[instance.id] = instance
+
if isinstance(self._start_executor, str) and name == self._start_executor:
start_executor = instance
+
# All executors will get their own internal edge group for receiving system messages
deferred_edge_groups.append(InternalEdgeGroup(instance.id)) # type: ignore[call-arg]
- executors[name] = instance
+ factory_name_to_instance[name] = instance
def _get_executor(name: str) -> Executor:
"""Helper to get executor by the registered name. Raises if not found."""
- if name not in executors:
- raise ValueError(f"Executor with name '{name}' has not been registered.")
- return executors[name]
+ if name not in factory_name_to_instance:
+ raise ValueError(f"Factory '{name}' has not been registered.")
+ return factory_name_to_instance[name]
for registration in self._edge_registry:
match registration:
@@ -1179,7 +1187,7 @@ def _get_executor(name: str) -> Executor:
cases_converted: list[SwitchCaseEdgeGroupCase | SwitchCaseEdgeGroupDefault] = []
for case in cases:
if not isinstance(case.target, str):
- raise ValueError("Switch case target must be a registered executor name (str) if deferred.")
+ raise ValueError("Switch case target must be a registered factory name (str) if deferred.")
target_exec = _get_executor(case.target)
if isinstance(case, Default):
cases_converted.append(SwitchCaseEdgeGroupDefault(target_id=target_exec.id))
@@ -1201,7 +1209,7 @@ def _get_executor(name: str) -> Executor:
if start_executor is None:
raise ValueError("Failed to resolve starting executor from registered factories.")
- return start_executor, list(executors.values()), deferred_edge_groups
+ return start_executor, list(executor_id_to_instance.values()), deferred_edge_groups
def build(self) -> Workflow:
"""Build and return the constructed workflow.
diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py
index d1857fb4fe..a537884ba4 100644
--- a/python/packages/core/agent_framework/openai/_responses_client.py
+++ b/python/packages/core/agent_framework/openai/_responses_client.py
@@ -3,7 +3,7 @@
from collections.abc import AsyncIterable, Awaitable, Callable, Mapping, MutableMapping, MutableSequence, Sequence
from datetime import datetime, timezone
from itertools import chain
-from typing import Any, TypeVar
+from typing import Any, TypeVar, cast
from openai import AsyncOpenAI, BadRequestError
from openai.types.responses.file_search_tool_param import FileSearchToolParam
@@ -199,7 +199,7 @@ def _prepare_text_config(
return response_format, prepared_text
if isinstance(response_format, Mapping):
- format_config = self._convert_response_format(response_format)
+ format_config = self._convert_response_format(cast("Mapping[str, Any]", response_format))
if prepared_text is None:
prepared_text = {}
elif "format" in prepared_text and prepared_text["format"] != format_config:
@@ -212,20 +212,21 @@ def _prepare_text_config(
def _convert_response_format(self, response_format: Mapping[str, Any]) -> dict[str, Any]:
"""Convert Chat style response_format into Responses text format config."""
if "format" in response_format and isinstance(response_format["format"], Mapping):
- return dict(response_format["format"])
+ return dict(cast("Mapping[str, Any]", response_format["format"]))
format_type = response_format.get("type")
if format_type == "json_schema":
schema_section = response_format.get("json_schema", response_format)
if not isinstance(schema_section, Mapping):
raise ServiceInvalidRequestError("json_schema response_format must be a mapping.")
- schema = schema_section.get("schema")
+ schema_section_typed = cast("Mapping[str, Any]", schema_section)
+ schema: Any = schema_section_typed.get("schema")
if schema is None:
raise ServiceInvalidRequestError("json_schema response_format requires a schema.")
- name = (
- schema_section.get("name")
- or schema_section.get("title")
- or (schema.get("title") if isinstance(schema, Mapping) else None)
+ name: str = str(
+ schema_section_typed.get("name")
+ or schema_section_typed.get("title")
+ or (cast("Mapping[str, Any]", schema).get("title") if isinstance(schema, Mapping) else None)
or "response"
)
format_config: dict[str, Any] = {
@@ -532,12 +533,13 @@ def _openai_content_parser(
"text": content.text,
},
}
- if content.additional_properties is not None:
- if status := content.additional_properties.get("status"):
+ props: dict[str, Any] | None = getattr(content, "additional_properties", None)
+ if props:
+ if status := props.get("status"):
ret["status"] = status
- if reasoning_text := content.additional_properties.get("reasoning_text"):
+ if reasoning_text := props.get("reasoning_text"):
ret["content"] = {"type": "reasoning_text", "text": reasoning_text}
- if encrypted_content := content.additional_properties.get("encrypted_content"):
+ if encrypted_content := props.get("encrypted_content"):
ret["encrypted_content"] = encrypted_content
return ret
case DataContent() | UriContent():
@@ -824,7 +826,7 @@ def _create_response_content(
"raw_representation": response,
}
- conversation_id = self.get_conversation_id(response, chat_options.store)
+ conversation_id = self.get_conversation_id(response, chat_options.store) # type: ignore[reportArgumentType]
if conversation_id:
args["conversation_id"] = conversation_id
@@ -911,6 +913,8 @@ def _create_streaming_response_content(
metadata.update(self._get_metadata_from_response(event_part))
case "refusal":
contents.append(TextContent(text=event_part.refusal, raw_representation=event))
+ case _:
+ pass
case "response.output_text.delta":
contents.append(TextContent(text=event.delta, raw_representation=event))
metadata.update(self._get_metadata_from_response(event))
@@ -1032,6 +1036,60 @@ def _create_streaming_response_content(
raw_representation=event,
)
)
+ case "response.output_text.annotation.added":
+ # Handle streaming text annotations (file citations, file paths, etc.)
+ annotation: Any = event.annotation
+
+ def _get_ann_value(key: str) -> Any:
+ """Extract value from annotation (dict or object)."""
+ if isinstance(annotation, dict):
+ return cast("dict[str, Any]", annotation).get(key)
+ return getattr(annotation, key, None)
+
+ ann_type = _get_ann_value("type")
+ ann_file_id = _get_ann_value("file_id")
+ if ann_type == "file_path":
+ if ann_file_id:
+ contents.append(
+ HostedFileContent(
+ file_id=str(ann_file_id),
+ additional_properties={
+ "annotation_index": event.annotation_index,
+ "index": _get_ann_value("index"),
+ },
+ raw_representation=event,
+ )
+ )
+ elif ann_type == "file_citation":
+ if ann_file_id:
+ contents.append(
+ HostedFileContent(
+ file_id=str(ann_file_id),
+ additional_properties={
+ "annotation_index": event.annotation_index,
+ "filename": _get_ann_value("filename"),
+ "index": _get_ann_value("index"),
+ },
+ raw_representation=event,
+ )
+ )
+ elif ann_type == "container_file_citation":
+ if ann_file_id:
+ contents.append(
+ HostedFileContent(
+ file_id=str(ann_file_id),
+ additional_properties={
+ "annotation_index": event.annotation_index,
+ "container_id": _get_ann_value("container_id"),
+ "filename": _get_ann_value("filename"),
+ "start_index": _get_ann_value("start_index"),
+ "end_index": _get_ann_value("end_index"),
+ },
+ raw_representation=event,
+ )
+ )
+ else:
+ logger.debug("Unparsed annotation type in streaming: %s", ann_type)
case _:
logger.debug("Unparsed event of type: %s: %s", event.type, event)
diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml
index aa8e2611f5..ba55b9498d 100644
--- a/python/packages/core/pyproject.toml
+++ b/python/packages/core/pyproject.toml
@@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
@@ -35,7 +35,7 @@ dependencies = [
# connectors and functions
"openai>=1.99.0",
"azure-identity>=1,<2",
- "mcp[ws]>=1.13",
+ "mcp[ws]>=1.23",
"packaging>=24.1",
]
diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py
index 264ff1929b..813667bb7a 100644
--- a/python/packages/core/tests/core/test_mcp.py
+++ b/python/packages/core/tests/core/test_mcp.py
@@ -9,7 +9,7 @@
from mcp import types
from mcp.client.session import ClientSession
from mcp.shared.exceptions import McpError
-from pydantic import AnyUrl, ValidationError
+from pydantic import AnyUrl, BaseModel, ValidationError
from agent_framework import (
ChatMessage,
@@ -357,122 +357,360 @@ def test_chat_message_to_mcp_types():
assert isinstance(mcp_contents[1], types.ImageContent)
-def test_get_input_model_from_mcp_tool():
- """Test creation of input model from MCP tool."""
- tool = types.Tool(
- name="test_tool",
- description="A test tool",
- inputSchema={
- "type": "object",
- "properties": {"param1": {"type": "string"}, "param2": {"type": "number"}},
- "required": ["param1"],
- },
- )
- model = _get_input_model_from_mcp_tool(tool)
-
- # Create an instance to verify the model works
- instance = model(param1="test", param2=42)
- assert instance.param1 == "test"
- assert instance.param2 == 42
-
- # Test validation
- with pytest.raises(ValidationError): # Missing required param1
- model(param2=42)
-
-
-def test_get_input_model_from_mcp_tool_with_nested_object():
- """Test creation of input model from MCP tool with nested object property."""
- tool = types.Tool(
- name="get_customer_detail",
- description="Get customer details",
- inputSchema={
- "type": "object",
- "properties": {
- "params": {
- "type": "object",
- "properties": {"customer_id": {"type": "integer"}},
- "required": ["customer_id"],
- }
+@pytest.mark.parametrize(
+ "test_id,input_schema,valid_data,expected_values,invalid_data,validation_check",
+ [
+ # Basic types with required/optional fields
+ (
+ "basic_types",
+ {
+ "type": "object",
+ "properties": {"param1": {"type": "string"}, "param2": {"type": "number"}},
+ "required": ["param1"],
},
- "required": ["params"],
- },
- )
- model = _get_input_model_from_mcp_tool(tool)
-
- # Create an instance to verify the model works with nested objects
- instance = model(params={"customer_id": 251})
- assert instance.params == {"customer_id": 251}
- assert isinstance(instance.params, dict)
-
- # Verify model_dump produces the correct nested structure
- dumped = instance.model_dump()
- assert dumped == {"params": {"customer_id": 251}}
-
-
-def test_get_input_model_from_mcp_tool_with_ref_schema():
- """Test creation of input model from MCP tool with $ref schema.
-
- This simulates a FastMCP tool that uses Pydantic models with $ref in the schema.
- The schema should be resolved and nested objects should be preserved.
- """
- # This is similar to what FastMCP generates when you have:
- # async def get_customer_detail(params: CustomerIdParam) -> CustomerDetail
- tool = types.Tool(
- name="get_customer_detail",
- description="Get customer details",
- inputSchema={
- "type": "object",
- "properties": {"params": {"$ref": "#/$defs/CustomerIdParam"}},
- "required": ["params"],
- "$defs": {
- "CustomerIdParam": {
- "type": "object",
- "properties": {"customer_id": {"type": "integer"}},
- "required": ["customer_id"],
+ {"param1": "test", "param2": 42},
+ {"param1": "test", "param2": 42},
+ {"param2": 42}, # Missing required param1
+ None,
+ ),
+ # Nested object
+ (
+ "nested_object",
+ {
+ "type": "object",
+ "properties": {
+ "params": {
+ "type": "object",
+ "properties": {"customer_id": {"type": "integer"}},
+ "required": ["customer_id"],
+ }
+ },
+ "required": ["params"],
+ },
+ {"params": {"customer_id": 251}},
+ {"params.customer_id": 251},
+ {"params": {}}, # Missing required customer_id
+ lambda instance: isinstance(instance.params, BaseModel),
+ ),
+ # $ref resolution
+ (
+ "ref_schema",
+ {
+ "type": "object",
+ "properties": {"params": {"$ref": "#/$defs/CustomerIdParam"}},
+ "required": ["params"],
+ "$defs": {
+ "CustomerIdParam": {
+ "type": "object",
+ "properties": {"customer_id": {"type": "integer"}},
+ "required": ["customer_id"],
+ }
+ },
+ },
+ {"params": {"customer_id": 251}},
+ {"params.customer_id": 251},
+ {"params": {}}, # Missing required customer_id
+ lambda instance: isinstance(instance.params, BaseModel),
+ ),
+ # Array of strings (typed)
+ (
+ "array_of_strings",
+ {
+ "type": "object",
+ "properties": {
+ "tags": {
+ "type": "array",
+ "description": "List of tags",
+ "items": {"type": "string"},
+ }
+ },
+ "required": ["tags"],
+ },
+ {"tags": ["tag1", "tag2", "tag3"]},
+ {"tags": ["tag1", "tag2", "tag3"]},
+ None, # No validation error test for this case
+ None,
+ ),
+ # Array of integers (typed)
+ (
+ "array_of_integers",
+ {
+ "type": "object",
+ "properties": {
+ "numbers": {
+ "type": "array",
+ "description": "List of integers",
+ "items": {"type": "integer"},
+ }
+ },
+ "required": ["numbers"],
+ },
+ {"numbers": [1, 2, 3]},
+ {"numbers": [1, 2, 3]},
+ None,
+ None,
+ ),
+ # Array of objects (complex nested)
+ (
+ "array_of_objects",
+ {
+ "type": "object",
+ "properties": {
+ "users": {
+ "type": "array",
+ "description": "List of users",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {"type": "integer", "description": "User ID"},
+ "name": {"type": "string", "description": "User name"},
+ },
+ "required": ["id", "name"],
+ },
+ }
+ },
+ "required": ["users"],
+ },
+ {"users": [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}]},
+ {"users[0].id": 1, "users[0].name": "Alice", "users[1].id": 2, "users[1].name": "Bob"},
+ {"users": [{"id": 1}]}, # Missing required 'name'
+ lambda instance: all(isinstance(user, BaseModel) for user in instance.users),
+ ),
+ # Deeply nested objects (3+ levels)
+ (
+ "deeply_nested",
+ {
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "object",
+ "properties": {
+ "filters": {
+ "type": "object",
+ "properties": {
+ "date_range": {
+ "type": "object",
+ "properties": {
+ "start": {"type": "string"},
+ "end": {"type": "string"},
+ },
+ "required": ["start", "end"],
+ },
+ "categories": {"type": "array", "items": {"type": "string"}},
+ },
+ "required": ["date_range"],
+ }
+ },
+ "required": ["filters"],
+ }
+ },
+ "required": ["query"],
+ },
+ {
+ "query": {
+ "filters": {
+ "date_range": {"start": "2024-01-01", "end": "2024-12-31"},
+ "categories": ["tech", "science"],
+ }
}
},
- },
- )
- model = _get_input_model_from_mcp_tool(tool)
-
- # Create an instance to verify the model works with $ref schemas
- instance = model(params={"customer_id": 251})
- assert instance.params == {"customer_id": 251}
- assert isinstance(instance.params, dict)
-
- # Verify model_dump produces the correct nested structure
- dumped = instance.model_dump()
- assert dumped == {"params": {"customer_id": 251}}
-
-
-def test_get_input_model_from_mcp_tool_with_simple_array():
- """Test array with simple items schema (items schema should be preserved in json_schema_extra)."""
- tool = types.Tool(
- name="simple_array_tool",
- description="Tool with simple array",
- inputSchema={
- "type": "object",
- "properties": {
- "tags": {
- "type": "array",
- "description": "List of tags",
- "items": {"type": "string"}, # Simple string array
+ {
+ "query.filters.date_range.start": "2024-01-01",
+ "query.filters.date_range.end": "2024-12-31",
+ "query.filters.categories": ["tech", "science"],
+ },
+ {"query": {"filters": {"date_range": {}}}}, # Missing required start and end
+ None,
+ ),
+ # Complex $ref with nested structure
+ (
+ "ref_nested_structure",
+ {
+ "type": "object",
+ "properties": {"order": {"$ref": "#/$defs/OrderParams"}},
+ "required": ["order"],
+ "$defs": {
+ "OrderParams": {
+ "type": "object",
+ "properties": {
+ "customer": {"$ref": "#/$defs/Customer"},
+ "items": {"type": "array", "items": {"$ref": "#/$defs/OrderItem"}},
+ },
+ "required": ["customer", "items"],
+ },
+ "Customer": {
+ "type": "object",
+ "properties": {"id": {"type": "integer"}, "email": {"type": "string"}},
+ "required": ["id", "email"],
+ },
+ "OrderItem": {
+ "type": "object",
+ "properties": {"product_id": {"type": "string"}, "quantity": {"type": "integer"}},
+ "required": ["product_id", "quantity"],
+ },
+ },
+ },
+ {
+ "order": {
+ "customer": {"id": 123, "email": "test@example.com"},
+ "items": [{"product_id": "prod1", "quantity": 2}],
}
},
- "required": ["tags"],
- },
- )
+ {
+ "order.customer.id": 123,
+ "order.customer.email": "test@example.com",
+ "order.items[0].product_id": "prod1",
+ "order.items[0].quantity": 2,
+ },
+ {"order": {"customer": {"id": 123}, "items": []}}, # Missing email
+ lambda instance: isinstance(instance.order.customer, BaseModel),
+ ),
+ # Mixed types (primitives, arrays, nested objects)
+ (
+ "mixed_types",
+ {
+ "type": "object",
+ "properties": {
+ "simple_string": {"type": "string"},
+ "simple_number": {"type": "integer"},
+ "string_array": {"type": "array", "items": {"type": "string"}},
+ "nested_config": {
+ "type": "object",
+ "properties": {
+ "enabled": {"type": "boolean"},
+ "options": {"type": "array", "items": {"type": "string"}},
+ },
+ "required": ["enabled"],
+ },
+ },
+ "required": ["simple_string", "nested_config"],
+ },
+ {
+ "simple_string": "test",
+ "simple_number": 42,
+ "string_array": ["a", "b"],
+ "nested_config": {"enabled": True, "options": ["opt1", "opt2"]},
+ },
+ {
+ "simple_string": "test",
+ "simple_number": 42,
+ "string_array": ["a", "b"],
+ "nested_config.enabled": True,
+ "nested_config.options": ["opt1", "opt2"],
+ },
+ None,
+ None,
+ ),
+ # Empty schema (no properties)
+ (
+ "empty_schema",
+ {"type": "object", "properties": {}},
+ {},
+ {},
+ None,
+ None,
+ ),
+ # All primitive types
+ (
+ "all_primitives",
+ {
+ "type": "object",
+ "properties": {
+ "string_field": {"type": "string"},
+ "integer_field": {"type": "integer"},
+ "number_field": {"type": "number"},
+ "boolean_field": {"type": "boolean"},
+ },
+ },
+ {"string_field": "test", "integer_field": 42, "number_field": 3.14, "boolean_field": True},
+ {"string_field": "test", "integer_field": 42, "number_field": 3.14, "boolean_field": True},
+ None,
+ None,
+ ),
+ # Edge case: unresolvable $ref (fallback to dict)
+ (
+ "unresolvable_ref",
+ {
+ "type": "object",
+ "properties": {"data": {"$ref": "#/$defs/NonExistent"}},
+ "$defs": {},
+ },
+ {"data": {"key": "value"}},
+ {"data": {"key": "value"}},
+ None,
+ None,
+ ),
+ # Edge case: array without items schema (fallback to bare list)
+ (
+ "array_no_items",
+ {
+ "type": "object",
+ "properties": {"items": {"type": "array"}},
+ },
+ {"items": [1, "two", 3.0]},
+ {"items": [1, "two", 3.0]},
+ None,
+ None,
+ ),
+ # Edge case: object without properties (fallback to dict)
+ (
+ "object_no_properties",
+ {
+ "type": "object",
+ "properties": {"config": {"type": "object"}},
+ },
+ {"config": {"arbitrary": "data", "nested": {"key": "value"}}},
+ {"config": {"arbitrary": "data", "nested": {"key": "value"}}},
+ None,
+ None,
+ ),
+ ],
+)
+def test_get_input_model_from_mcp_tool_parametrized(
+ test_id, input_schema, valid_data, expected_values, invalid_data, validation_check
+):
+ """Parametrized test for JSON schema to Pydantic model conversion.
+
+ This test covers various edge cases including:
+ - Basic types with required/optional fields
+ - Nested objects
+ - $ref resolution
+ - Typed arrays (strings, integers, objects)
+ - Deeply nested structures
+ - Complex $ref with nested structures
+ - Mixed types
+
+ To add a new test case, add a tuple to the parametrize decorator with:
+ - test_id: A descriptive name for the test case
+ - input_schema: The JSON schema (inputSchema dict)
+ - valid_data: Valid data to instantiate the model
+ - expected_values: Dict of expected values (supports dot notation for nested access)
+ - invalid_data: Invalid data to test validation errors (None to skip)
+ - validation_check: Optional callable to perform additional validation checks
+ """
+ tool = types.Tool(name="test_tool", description="A test tool", inputSchema=input_schema)
model = _get_input_model_from_mcp_tool(tool)
- # Create an instance
- instance = model(tags=["tag1", "tag2", "tag3"])
- assert instance.tags == ["tag1", "tag2", "tag3"]
-
- # Verify JSON schema still preserves items for simple types
- json_schema = model.model_json_schema()
- tags_property = json_schema["properties"]["tags"]
- assert "items" in tags_property
- assert tags_property["items"]["type"] == "string"
+ # Test valid data
+ instance = model(**valid_data)
+
+ # Check expected values
+ for field_path, expected_value in expected_values.items():
+ # Support dot notation and array indexing for nested access
+ current = instance
+ parts = field_path.replace("]", "").replace("[", ".").split(".")
+ for part in parts:
+ current = current[int(part)] if part.isdigit() else getattr(current, part)
+ assert current == expected_value, f"Field {field_path} = {current}, expected {expected_value}"
+
+ # Run additional validation checks if provided
+ if validation_check:
+ assert validation_check(instance), f"Validation check failed for {test_id}"
+
+ # Test invalid data if provided
+ if invalid_data is not None:
+ with pytest.raises(ValidationError):
+ model(**invalid_data)
def test_get_input_model_from_mcp_prompt():
diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py
index f2f9004d55..cc187e01f2 100644
--- a/python/packages/core/tests/openai/test_openai_responses_client.py
+++ b/python/packages/core/tests/openai/test_openai_responses_client.py
@@ -993,6 +993,110 @@ def test_streaming_response_basic_structure() -> None:
assert response.raw_representation is mock_event
+def test_streaming_annotation_added_with_file_path() -> None:
+ """Test streaming annotation added event with file_path type extracts HostedFileContent."""
+ client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
+ chat_options = ChatOptions()
+ function_call_ids: dict[int, tuple[str, str]] = {}
+
+ mock_event = MagicMock()
+ mock_event.type = "response.output_text.annotation.added"
+ mock_event.annotation_index = 0
+ mock_event.annotation = {
+ "type": "file_path",
+ "file_id": "file-abc123",
+ "index": 42,
+ }
+
+ response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids)
+
+ assert len(response.contents) == 1
+ content = response.contents[0]
+ assert isinstance(content, HostedFileContent)
+ assert content.file_id == "file-abc123"
+ assert content.additional_properties is not None
+ assert content.additional_properties.get("annotation_index") == 0
+ assert content.additional_properties.get("index") == 42
+
+
+def test_streaming_annotation_added_with_file_citation() -> None:
+ """Test streaming annotation added event with file_citation type extracts HostedFileContent."""
+ client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
+ chat_options = ChatOptions()
+ function_call_ids: dict[int, tuple[str, str]] = {}
+
+ mock_event = MagicMock()
+ mock_event.type = "response.output_text.annotation.added"
+ mock_event.annotation_index = 1
+ mock_event.annotation = {
+ "type": "file_citation",
+ "file_id": "file-xyz789",
+ "filename": "sample.txt",
+ "index": 15,
+ }
+
+ response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids)
+
+ assert len(response.contents) == 1
+ content = response.contents[0]
+ assert isinstance(content, HostedFileContent)
+ assert content.file_id == "file-xyz789"
+ assert content.additional_properties is not None
+ assert content.additional_properties.get("filename") == "sample.txt"
+ assert content.additional_properties.get("index") == 15
+
+
+def test_streaming_annotation_added_with_container_file_citation() -> None:
+ """Test streaming annotation added event with container_file_citation type."""
+ client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
+ chat_options = ChatOptions()
+ function_call_ids: dict[int, tuple[str, str]] = {}
+
+ mock_event = MagicMock()
+ mock_event.type = "response.output_text.annotation.added"
+ mock_event.annotation_index = 2
+ mock_event.annotation = {
+ "type": "container_file_citation",
+ "file_id": "file-container123",
+ "container_id": "container-456",
+ "filename": "data.csv",
+ "start_index": 10,
+ "end_index": 50,
+ }
+
+ response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids)
+
+ assert len(response.contents) == 1
+ content = response.contents[0]
+ assert isinstance(content, HostedFileContent)
+ assert content.file_id == "file-container123"
+ assert content.additional_properties is not None
+ assert content.additional_properties.get("container_id") == "container-456"
+ assert content.additional_properties.get("filename") == "data.csv"
+ assert content.additional_properties.get("start_index") == 10
+ assert content.additional_properties.get("end_index") == 50
+
+
+def test_streaming_annotation_added_with_unknown_type() -> None:
+ """Test streaming annotation added event with unknown type is ignored."""
+ client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
+ chat_options = ChatOptions()
+ function_call_ids: dict[int, tuple[str, str]] = {}
+
+ mock_event = MagicMock()
+ mock_event.type = "response.output_text.annotation.added"
+ mock_event.annotation_index = 0
+ mock_event.annotation = {
+ "type": "url_citation",
+ "url": "https://example.com",
+ }
+
+ response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids)
+
+ # url_citation should not produce HostedFileContent
+ assert len(response.contents) == 0
+
+
def test_service_response_exception_includes_original_error_details() -> None:
"""Test that ServiceResponseException messages include original error details in the new format."""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
diff --git a/python/packages/core/tests/workflow/test_concurrent.py b/python/packages/core/tests/workflow/test_concurrent.py
index db70be3f38..66cc8cfc68 100644
--- a/python/packages/core/tests/workflow/test_concurrent.py
+++ b/python/packages/core/tests/workflow/test_concurrent.py
@@ -3,6 +3,7 @@
from typing import Any, cast
import pytest
+from typing_extensions import Never
from agent_framework import (
AgentExecutorRequest,
@@ -52,6 +53,55 @@ def test_concurrent_builder_rejects_duplicate_executors() -> None:
ConcurrentBuilder().participants([a, b])
+def test_concurrent_builder_rejects_duplicate_executors_from_factories() -> None:
+ """Test that duplicate executor IDs from factories are detected at build time."""
+
+ def create_dup1() -> Executor:
+ return _FakeAgentExec("dup", "A")
+
+ def create_dup2() -> Executor:
+ return _FakeAgentExec("dup", "B") # same executor id
+
+ builder = ConcurrentBuilder().register_participants([create_dup1, create_dup2])
+ with pytest.raises(ValueError, match="Executor with ID 'dup' has already been created."):
+ builder.build()
+
+
+def test_concurrent_builder_rejects_mixed_participants_and_factories() -> None:
+ """Test that mixing .participants() and .register_participants() raises an error."""
+ # Case 1: participants first, then register_participants
+ with pytest.raises(ValueError, match="Cannot mix .participants"):
+ (
+ ConcurrentBuilder()
+ .participants([_FakeAgentExec("a", "A")])
+ .register_participants([lambda: _FakeAgentExec("b", "B")])
+ )
+
+ # Case 2: register_participants first, then participants
+ with pytest.raises(ValueError, match="Cannot mix .participants"):
+ (
+ ConcurrentBuilder()
+ .register_participants([lambda: _FakeAgentExec("a", "A")])
+ .participants([_FakeAgentExec("b", "B")])
+ )
+
+
+def test_concurrent_builder_rejects_multiple_calls_to_participants() -> None:
+ """Test that multiple calls to .participants() raises an error."""
+ with pytest.raises(ValueError, match=r"participants\(\) has already been called"):
+ (ConcurrentBuilder().participants([_FakeAgentExec("a", "A")]).participants([_FakeAgentExec("b", "B")]))
+
+
+def test_concurrent_builder_rejects_multiple_calls_to_register_participants() -> None:
+ """Test that multiple calls to .register_participants() raises an error."""
+ with pytest.raises(ValueError, match=r"register_participants\(\) has already been called"):
+ (
+ ConcurrentBuilder()
+ .register_participants([lambda: _FakeAgentExec("a", "A")])
+ .register_participants([lambda: _FakeAgentExec("b", "B")])
+ )
+
+
async def test_concurrent_default_aggregator_emits_single_user_and_assistants() -> None:
# Three synthetic agent executors
e1 = _FakeAgentExec("agentA", "Alpha")
@@ -159,6 +209,138 @@ def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[over
assert aggregator.id == "summarize"
+async def test_concurrent_with_aggregator_executor_instance() -> None:
+ """Test with_aggregator using an Executor instance (not factory)."""
+
+ class CustomAggregator(Executor):
+ @handler
+ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None:
+ texts: list[str] = []
+ for r in results:
+ msgs: list[ChatMessage] = r.agent_run_response.messages
+ texts.append(msgs[-1].text if msgs else "")
+ await ctx.yield_output(" & ".join(sorted(texts)))
+
+ e1 = _FakeAgentExec("agentA", "One")
+ e2 = _FakeAgentExec("agentB", "Two")
+
+ aggregator_instance = CustomAggregator(id="instance_aggregator")
+ wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(aggregator_instance).build()
+
+ completed = False
+ output: str | None = None
+ async for ev in wf.run_stream("prompt: instance test"):
+ if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE:
+ completed = True
+ elif isinstance(ev, WorkflowOutputEvent):
+ output = cast(str, ev.data)
+ if completed and output is not None:
+ break
+
+ assert completed
+ assert output is not None
+ assert isinstance(output, str)
+ assert output == "One & Two"
+
+
+async def test_concurrent_with_aggregator_executor_factory() -> None:
+ """Test with_aggregator using an Executor factory."""
+
+ class CustomAggregator(Executor):
+ @handler
+ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None:
+ texts: list[str] = []
+ for r in results:
+ msgs: list[ChatMessage] = r.agent_run_response.messages
+ texts.append(msgs[-1].text if msgs else "")
+ await ctx.yield_output(" | ".join(sorted(texts)))
+
+ e1 = _FakeAgentExec("agentA", "One")
+ e2 = _FakeAgentExec("agentB", "Two")
+
+ wf = (
+ ConcurrentBuilder()
+ .participants([e1, e2])
+ .register_aggregator(lambda: CustomAggregator(id="custom_aggregator"))
+ .build()
+ )
+
+ completed = False
+ output: str | None = None
+ async for ev in wf.run_stream("prompt: factory test"):
+ if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE:
+ completed = True
+ elif isinstance(ev, WorkflowOutputEvent):
+ output = cast(str, ev.data)
+ if completed and output is not None:
+ break
+
+ assert completed
+ assert output is not None
+ assert isinstance(output, str)
+ assert output == "One | Two"
+
+
+async def test_concurrent_with_aggregator_executor_factory_with_default_id() -> None:
+ """Test with_aggregator using an Executor class directly as factory (with default __init__ parameters)."""
+
+ class CustomAggregator(Executor):
+ def __init__(self, id: str = "default_aggregator") -> None:
+ super().__init__(id)
+
+ @handler
+ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None:
+ texts: list[str] = []
+ for r in results:
+ msgs: list[ChatMessage] = r.agent_run_response.messages
+ texts.append(msgs[-1].text if msgs else "")
+ await ctx.yield_output(" | ".join(sorted(texts)))
+
+ e1 = _FakeAgentExec("agentA", "One")
+ e2 = _FakeAgentExec("agentB", "Two")
+
+ wf = ConcurrentBuilder().participants([e1, e2]).register_aggregator(CustomAggregator).build()
+
+ completed = False
+ output: str | None = None
+ async for ev in wf.run_stream("prompt: factory test"):
+ if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE:
+ completed = True
+ elif isinstance(ev, WorkflowOutputEvent):
+ output = cast(str, ev.data)
+ if completed and output is not None:
+ break
+
+ assert completed
+ assert output is not None
+ assert isinstance(output, str)
+ assert output == "One | Two"
+
+
+def test_concurrent_builder_rejects_multiple_calls_to_with_aggregator() -> None:
+ """Test that multiple calls to .with_aggregator() raises an error."""
+
+ def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[override]
+ return str(len(results))
+
+ with pytest.raises(ValueError, match=r"with_aggregator\(\) has already been called"):
+ (ConcurrentBuilder().with_aggregator(summarize).with_aggregator(summarize))
+
+
+def test_concurrent_builder_rejects_multiple_calls_to_register_aggregator() -> None:
+ """Test that multiple calls to .register_aggregator() raises an error."""
+
+ class CustomAggregator(Executor):
+ pass
+
+ with pytest.raises(ValueError, match=r"register_aggregator\(\) has already been called"):
+ (
+ ConcurrentBuilder()
+ .register_aggregator(lambda: CustomAggregator(id="agg1"))
+ .register_aggregator(lambda: CustomAggregator(id="agg2"))
+ )
+
+
async def test_concurrent_checkpoint_resume_round_trip() -> None:
storage = InMemoryCheckpointStorage()
@@ -278,3 +460,92 @@ async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None:
assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints"
assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden"
+
+
+def test_concurrent_builder_rejects_empty_participant_factories() -> None:
+ with pytest.raises(ValueError):
+ ConcurrentBuilder().register_participants([])
+
+
+async def test_concurrent_builder_reusable_after_build_with_participants() -> None:
+ """Test that the builder can be reused to build multiple identical workflows with participants()."""
+ e1 = _FakeAgentExec("agentA", "One")
+ e2 = _FakeAgentExec("agentB", "Two")
+
+ builder = ConcurrentBuilder().participants([e1, e2])
+
+ builder.build()
+
+ assert builder._participants[0] is e1 # type: ignore
+ assert builder._participants[1] is e2 # type: ignore
+ assert builder._participant_factories == [] # type: ignore
+
+
+async def test_concurrent_builder_reusable_after_build_with_factories() -> None:
+ """Test that the builder can be reused to build multiple workflows with register_participants()."""
+ call_count = 0
+
+ def create_agent_executor_a() -> Executor:
+ nonlocal call_count
+ call_count += 1
+ return _FakeAgentExec("agentA", "One")
+
+ def create_agent_executor_b() -> Executor:
+ nonlocal call_count
+ call_count += 1
+ return _FakeAgentExec("agentB", "Two")
+
+ builder = ConcurrentBuilder().register_participants([create_agent_executor_a, create_agent_executor_b])
+
+ # Build the first workflow
+ wf1 = builder.build()
+
+ assert builder._participants == [] # type: ignore
+ assert len(builder._participant_factories) == 2 # type: ignore
+ assert call_count == 2
+
+ # Build the second workflow
+ wf2 = builder.build()
+ assert call_count == 4
+
+ # Verify that the two workflows have different executor instances
+ assert wf1.executors["agentA"] is not wf2.executors["agentA"]
+ assert wf1.executors["agentB"] is not wf2.executors["agentB"]
+
+
+async def test_concurrent_with_register_participants() -> None:
+ """Test workflow creation using register_participants with factories."""
+
+ def create_agent1() -> Executor:
+ return _FakeAgentExec("agentA", "Alpha")
+
+ def create_agent2() -> Executor:
+ return _FakeAgentExec("agentB", "Beta")
+
+ def create_agent3() -> Executor:
+ return _FakeAgentExec("agentC", "Gamma")
+
+ wf = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build()
+
+ completed = False
+ output: list[ChatMessage] | None = None
+ async for ev in wf.run_stream("test prompt"):
+ if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE:
+ completed = True
+ elif isinstance(ev, WorkflowOutputEvent):
+ output = cast(list[ChatMessage], ev.data)
+ if completed and output is not None:
+ break
+
+ assert completed
+ assert output is not None
+ messages: list[ChatMessage] = output
+
+ # Expect one user message + one assistant message per participant
+ assert len(messages) == 1 + 3
+ assert messages[0].role == Role.USER
+ assert "test prompt" in messages[0].text
+
+ assistant_texts = {m.text for m in messages[1:]}
+ assert assistant_texts == {"Alpha", "Beta", "Gamma"}
+ assert all(m.role == Role.ASSISTANT for m in messages[1:])
diff --git a/python/packages/core/tests/workflow/test_workflow_builder.py b/python/packages/core/tests/workflow/test_workflow_builder.py
index a037bf51b6..b281edee34 100644
--- a/python/packages/core/tests/workflow/test_workflow_builder.py
+++ b/python/packages/core/tests/workflow/test_workflow_builder.py
@@ -293,6 +293,20 @@ def test_register_duplicate_name_raises_error():
builder.register_executor(lambda: MockExecutor(id="executor_2"), name="MyExecutor")
+def test_register_duplicate_id_raises_error():
+ """Test that registering duplicate id raises an error."""
+ builder = WorkflowBuilder()
+
+ # Register first executor
+ builder.register_executor(lambda: MockExecutor(id="executor"), name="MyExecutor1")
+ builder.register_executor(lambda: MockExecutor(id="executor"), name="MyExecutor2")
+ builder.set_start_executor("MyExecutor1")
+
+ # Registering second executor with same ID should raise ValueError
+ with pytest.raises(ValueError, match="Executor with ID 'executor' has already been created."):
+ builder.build()
+
+
def test_register_agent_basic():
"""Test basic agent registration with lazy initialization."""
builder = WorkflowBuilder()
diff --git a/python/packages/declarative/pyproject.toml b/python/packages/declarative/pyproject.toml
index f1032eb064..d36c2b76b5 100644
--- a/python/packages/declarative/pyproject.toml
+++ b/python/packages/declarative/pyproject.toml
@@ -4,7 +4,7 @@ description = "Declarative specification support for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/devui/pyproject.toml b/python/packages/devui/pyproject.toml
index 4501d6e439..b4b43903bb 100644
--- a/python/packages/devui/pyproject.toml
+++ b/python/packages/devui/pyproject.toml
@@ -4,7 +4,7 @@ description = "Debug UI for Microsoft Agent Framework with OpenAI-compatible API
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://github.com/microsoft/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/lab/pyproject.toml b/python/packages/lab/pyproject.toml
index ead8033974..6b95b5a8e6 100644
--- a/python/packages/lab/pyproject.toml
+++ b/python/packages/lab/pyproject.toml
@@ -4,7 +4,7 @@ description = "Experimental modules for Microsoft Agent Framework"
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/mem0/pyproject.toml b/python/packages/mem0/pyproject.toml
index 3317cc4841..140abca32b 100644
--- a/python/packages/mem0/pyproject.toml
+++ b/python/packages/mem0/pyproject.toml
@@ -4,7 +4,7 @@ description = "Mem0 integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/purview/pyproject.toml b/python/packages/purview/pyproject.toml
index 8eeebd9525..5715639d19 100644
--- a/python/packages/purview/pyproject.toml
+++ b/python/packages/purview/pyproject.toml
@@ -4,7 +4,7 @@ description = "Microsoft Purview (Graph dataSecurityAndGovernance) integration f
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://github.com/microsoft/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/packages/redis/pyproject.toml b/python/packages/redis/pyproject.toml
index d58c4f72ce..3cfa854300 100644
--- a/python/packages/redis/pyproject.toml
+++ b/python/packages/redis/pyproject.toml
@@ -4,7 +4,7 @@ description = "Redis integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
diff --git a/python/pyproject.toml b/python/pyproject.toml
index bc43fb1df4..464f7c3f61 100644
--- a/python/pyproject.toml
+++ b/python/pyproject.toml
@@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
@@ -23,7 +23,7 @@ classifiers = [
"Typing :: Typed",
]
dependencies = [
- "agent-framework-core[all]==1.0.0b251209",
+ "agent-framework-core[all]==1.0.0b251211",
]
[dependency-groups]
diff --git a/python/samples/README.md b/python/samples/README.md
index 3434fa639d..af51b28379 100644
--- a/python/samples/README.md
+++ b/python/samples/README.md
@@ -25,6 +25,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen
| [`getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py) | Azure AI Agent with Azure AI Search Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py) | Azure AI agent with Bing Grounding search for real-time web information |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py) | Azure AI Agent with Code Interpreter Example |
+| [`getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py) | Azure AI Agent with Code Interpreter File Generation Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py) | Azure AI Agent with Existing Agent Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py) | Azure AI Agent with Existing Thread Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py) | Azure AI Agent with Explicit Settings Example |
@@ -47,6 +48,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen
| [`getting_started/agents/azure_ai/azure_ai_with_bing_custom_search.py`](./getting_started/agents/azure_ai/azure_ai_with_bing_custom_search.py) | Azure AI Agent with Bing Custom Search Example |
| [`getting_started/agents/azure_ai/azure_ai_with_browser_automation.py`](./getting_started/agents/azure_ai/azure_ai_with_browser_automation.py) | Azure AI Agent with Browser Automation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py`](./getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py) | Azure AI Agent with Code Interpreter Example |
+| [`getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py`](./getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py) | Azure AI Agent with Code Interpreter File Generation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_existing_agent.py`](./getting_started/agents/azure_ai/azure_ai_with_existing_agent.py) | Azure AI Agent with Existing Agent Example |
| [`getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py`](./getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py) | Azure AI Agent with Existing Conversation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py`](./getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py) | Azure AI Agent with Explicit Settings Example |
diff --git a/python/samples/getting_started/agents/azure_ai/README.md b/python/samples/getting_started/agents/azure_ai/README.md
index 437094795b..8ed95ad091 100644
--- a/python/samples/getting_started/agents/azure_ai/README.md
+++ b/python/samples/getting_started/agents/azure_ai/README.md
@@ -14,6 +14,7 @@ This folder contains examples demonstrating different ways to create and use age
| [`azure_ai_with_bing_custom_search.py`](azure_ai_with_bing_custom_search.py) | Shows how to use Bing Custom Search with Azure AI agents to search custom search instances and provide responses with relevant results. Requires a Bing Custom Search connection and instance configured in your Azure AI project. |
| [`azure_ai_with_browser_automation.py`](azure_ai_with_browser_automation.py) | Shows how to use Browser Automation with Azure AI agents to perform automated web browsing tasks and provide responses based on web interactions. Requires a Browser Automation connection configured in your Azure AI project. |
| [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use the `HostedCodeInterpreterTool` with Azure AI agents to write and execute Python code for mathematical problem solving and data analysis. |
+| [`azure_ai_with_code_interpreter_file_generation.py`](azure_ai_with_code_interpreter_file_generation.py) | Shows how to retrieve file IDs from code interpreter generated files using both streaming and non-streaming approaches. |
| [`azure_ai_with_existing_agent.py`](azure_ai_with_existing_agent.py) | Shows how to work with a pre-existing agent by providing the agent name and version to the Azure AI client. Demonstrates agent reuse patterns for production scenarios. |
| [`azure_ai_with_existing_conversation.py`](azure_ai_with_existing_conversation.py) | Demonstrates how to use an existing conversation created on the service side with Azure AI agents. Shows two approaches: specifying conversation ID at the client level and using AgentThread with an existing conversation ID. |
| [`azure_ai_with_application_endpoint.py`](azure_ai_with_application_endpoint.py) | Demonstrates calling the Azure AI application-scoped endpoint. |
diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py
new file mode 100644
index 0000000000..76758d1b61
--- /dev/null
+++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py
@@ -0,0 +1,111 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+
+from agent_framework import (
+ CitationAnnotation,
+ HostedCodeInterpreterTool,
+ HostedFileContent,
+ TextContent,
+)
+from agent_framework._agents import AgentRunResponseUpdate
+from agent_framework.azure import AzureAIClient
+from azure.identity.aio import AzureCliCredential
+
+"""
+Azure AI V2 Code Interpreter File Generation Sample
+
+This sample demonstrates how the V2 AzureAIClient handles file annotations
+when code interpreter generates text files. It shows both non-streaming
+and streaming approaches to verify file ID extraction.
+"""
+
+QUERY = (
+ "Write a simple Python script that creates a text file called 'sample.txt' containing "
+ "'Hello from the code interpreter!' and save it to disk."
+)
+
+
+async def test_non_streaming() -> None:
+ """Test non-streaming response - should have annotations on TextContent."""
+ print("=== Testing Non-Streaming Response ===")
+
+ async with (
+ AzureCliCredential() as credential,
+ AzureAIClient(credential=credential).create_agent(
+ name="V2CodeInterpreterFileAgent",
+ instructions="You are a helpful assistant that can write and execute Python code to create files.",
+ tools=HostedCodeInterpreterTool(),
+ ) as agent,
+ ):
+ print(f"User: {QUERY}\n")
+
+ result = await agent.run(QUERY)
+ print(f"Agent: {result.text}\n")
+
+ # Check for annotations in the response
+ annotations_found: list[str] = []
+ # AgentRunResponse has messages property, which contains ChatMessage objects
+ for message in result.messages:
+ for content in message.contents:
+ if isinstance(content, TextContent) and content.annotations:
+ for annotation in content.annotations:
+ if isinstance(annotation, CitationAnnotation) and annotation.file_id:
+ annotations_found.append(annotation.file_id)
+ print(f"Found file annotation: file_id={annotation.file_id}")
+
+ if annotations_found:
+ print(f"SUCCESS: Found {len(annotations_found)} file annotation(s)")
+ else:
+ print("WARNING: No file annotations found in non-streaming response")
+
+
+async def test_streaming() -> None:
+ """Test streaming response - check if file content is captured via HostedFileContent."""
+ print("\n=== Testing Streaming Response ===")
+
+ async with (
+ AzureCliCredential() as credential,
+ AzureAIClient(credential=credential).create_agent(
+ name="V2CodeInterpreterFileAgentStreaming",
+ instructions="You are a helpful assistant that can write and execute Python code to create files.",
+ tools=HostedCodeInterpreterTool(),
+ ) as agent,
+ ):
+ print(f"User: {QUERY}\n")
+ annotations_found: list[str] = []
+ text_chunks: list[str] = []
+ file_ids_found: list[str] = []
+
+ async for update in agent.run_stream(QUERY):
+ if isinstance(update, AgentRunResponseUpdate):
+ for content in update.contents:
+ if isinstance(content, TextContent):
+ if content.text:
+ text_chunks.append(content.text)
+ if content.annotations:
+ for annotation in content.annotations:
+ if isinstance(annotation, CitationAnnotation) and annotation.file_id:
+ annotations_found.append(annotation.file_id)
+ print(f"Found streaming annotation: file_id={annotation.file_id}")
+ elif isinstance(content, HostedFileContent):
+ file_ids_found.append(content.file_id)
+ print(f"Found streaming HostedFileContent: file_id={content.file_id}")
+
+ print(f"\nAgent response: {''.join(text_chunks)[:200]}...")
+
+ if annotations_found or file_ids_found:
+ total = len(annotations_found) + len(file_ids_found)
+ print(f"SUCCESS: Found {total} file reference(s) in streaming")
+ else:
+ print("WARNING: No file annotations found in streaming response")
+
+
+async def main() -> None:
+ print("AzureAIClient Code Interpreter File Generation Test\n")
+ await test_non_streaming()
+ await test_streaming()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/samples/getting_started/agents/azure_ai_agent/README.md b/python/samples/getting_started/agents/azure_ai_agent/README.md
index c7337bbe82..c29c068f37 100644
--- a/python/samples/getting_started/agents/azure_ai_agent/README.md
+++ b/python/samples/getting_started/agents/azure_ai_agent/README.md
@@ -9,6 +9,7 @@ This folder contains examples demonstrating different ways to create and use age
| [`azure_ai_basic.py`](azure_ai_basic.py) | The simplest way to create an agent using `ChatAgent` with `AzureAIAgentClient`. It automatically handles all configuration using environment variables. |
| [`azure_ai_with_bing_custom_search.py`](azure_ai_with_bing_custom_search.py) | Shows how to use Bing Custom Search with Azure AI agents to find real-time information from the web using custom search configurations. Demonstrates how to set up and use HostedWebSearchTool with custom search instances. |
| [`azure_ai_with_bing_grounding.py`](azure_ai_with_bing_grounding.py) | Shows how to use Bing Grounding search with Azure AI agents to find real-time information from the web. Demonstrates web search capabilities with proper source citations and comprehensive error handling. |
+| [`azure_ai_with_code_interpreter_file_generation.py`](azure_ai_with_code_interpreter_file_generation.py) | Shows how to retrieve file IDs from code interpreter generated files using both streaming and non-streaming approaches. |
| [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use the HostedCodeInterpreterTool with Azure AI agents to write and execute Python code. Includes helper methods for accessing code interpreter data from response chunks. |
| [`azure_ai_with_existing_agent.py`](azure_ai_with_existing_agent.py) | Shows how to work with a pre-existing agent by providing the agent ID to the Azure AI chat client. This example also demonstrates proper cleanup of manually created agents. |
| [`azure_ai_with_existing_thread.py`](azure_ai_with_existing_thread.py) | Shows how to work with a pre-existing thread by providing the thread ID to the Azure AI chat client. This example also demonstrates proper cleanup of manually created threads. |
diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py
new file mode 100644
index 0000000000..cbd64bc5a7
--- /dev/null
+++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py
@@ -0,0 +1,102 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+
+from agent_framework import AgentRunResponseUpdate, ChatAgent, HostedCodeInterpreterTool, HostedFileContent
+from agent_framework.azure import AzureAIAgentClient
+from azure.identity.aio import AzureCliCredential
+
+"""
+Azure AI Agent Code Interpreter File Generation Example
+
+This sample demonstrates using HostedCodeInterpreterTool with AzureAIAgentClient
+to generate a text file and then retrieve it.
+
+The test flow:
+1. Create an agent with code interpreter tool
+2. Ask the agent to generate a txt file using Python code
+3. Capture the file_id from HostedFileContent in the response
+4. Retrieve the file using the agents_client.files API
+"""
+
+
+async def main() -> None:
+ """Test file generation and retrieval with code interpreter."""
+
+ async with AzureCliCredential() as credential:
+ client = AzureAIAgentClient(credential=credential)
+
+ try:
+ async with ChatAgent(
+ chat_client=client,
+ instructions=(
+ "You are a Python code execution assistant. "
+ "ALWAYS use the code interpreter tool to execute Python code when asked to create files. "
+ "Write actual Python code to create files, do not just describe what you would do."
+ ),
+ tools=[HostedCodeInterpreterTool()],
+ ) as agent:
+ # Be very explicit about wanting code execution and a download link
+ query = (
+ "Use the code interpreter to execute this Python code and then provide me "
+ "with a download link for the generated file:\n"
+ "```python\n"
+ "with open('/mnt/data/sample.txt', 'w') as f:\n"
+ " f.write('Hello, World! This is a test file.')\n"
+ "'/mnt/data/sample.txt'\n" # Return the path so it becomes downloadable
+ "```"
+ )
+ print(f"User: {query}\n")
+ print("=" * 60)
+
+ # Collect file_ids from the response
+ file_ids: list[str] = []
+
+ async for chunk in agent.run_stream(query):
+ if not isinstance(chunk, AgentRunResponseUpdate):
+ continue
+
+ for content in chunk.contents:
+ if content.type == "text":
+ print(content.text, end="", flush=True)
+ elif content.type == "hosted_file":
+ if isinstance(content, HostedFileContent):
+ file_ids.append(content.file_id)
+ print(f"\n[File generated: {content.file_id}]")
+
+ print("\n" + "=" * 60)
+
+ # Attempt to retrieve discovered files
+ if file_ids:
+ print(f"\nAttempting to retrieve {len(file_ids)} file(s):")
+ for file_id in file_ids:
+ try:
+ file_info = await client.agents_client.files.get(file_id)
+ print(f" File {file_id}: Retrieved successfully")
+ print(f" Filename: {file_info.filename}")
+ print(f" Purpose: {file_info.purpose}")
+ print(f" Bytes: {file_info.bytes}")
+ except Exception as e:
+ print(f" File {file_id}: FAILED to retrieve - {e}")
+ else:
+ print("No file IDs were captured from the response.")
+
+ # List all files to see if any exist
+ print("\nListing all files in the agent service:")
+ try:
+ files_list = await client.agents_client.files.list()
+ count = 0
+ for file_info in files_list.data:
+ count += 1
+ print(f" - {file_info.id}: {file_info.filename} ({file_info.purpose})")
+ if count == 0:
+ print(" No files found.")
+ except Exception as e:
+ print(f" Failed to list files: {e}")
+
+ finally:
+ await client.close()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md
index 8f193eef72..7c5ad5687f 100644
--- a/python/samples/getting_started/workflows/README.md
+++ b/python/samples/getting_started/workflows/README.md
@@ -110,6 +110,7 @@ For additional observability samples in Agent Framework, see the [observability
| Concurrent Orchestration (Default Aggregator) | [orchestration/concurrent_agents.py](./orchestration/concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined ChatMessages |
| Concurrent Orchestration (Custom Aggregator) | [orchestration/concurrent_custom_aggregator.py](./orchestration/concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM |
| Concurrent Orchestration (Custom Agent Executors) | [orchestration/concurrent_custom_agent_executors.py](./orchestration/concurrent_custom_agent_executors.py) | Child executors own ChatAgents; concurrent fan-out/fan-in via ConcurrentBuilder |
+| Concurrent Orchestration (Participant Factory) | [orchestration/concurrent_participant_factory.py](./orchestration/concurrent_participant_factory.py) | Use participant factories for state isolation between workflow instances |
| Group Chat with Agent Manager | [orchestration/group_chat_agent_manager.py](./orchestration/group_chat_agent_manager.py) | Agent-based manager using `set_manager()` to select next speaker |
| Group Chat Philosophical Debate | [orchestration/group_chat_philosophical_debate.py](./orchestration/group_chat_philosophical_debate.py) | Agent manager moderates long-form, multi-round debate across diverse participants |
| Group Chat with Simple Function Selector | [orchestration/group_chat_simple_selector.py](./orchestration/group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker |
diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py b/python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py
index 4ad8c9fcb3..44f71ba7bc 100644
--- a/python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py
+++ b/python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py
@@ -17,7 +17,7 @@
The workflow completes when all participants become idle.
Demonstrates:
-- ConcurrentBuilder().participants([...]).with_custom_aggregator(callback)
+- ConcurrentBuilder().participants([...]).with_aggregator(callback)
- Fan-out to agents and fan-in at an aggregator
- Aggregation implemented via an LLM call (chat_client.get_response)
- Workflow output yielded with the synthesized summary string
diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py b/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py
new file mode 100644
index 0000000000..435e59b2ba
--- /dev/null
+++ b/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py
@@ -0,0 +1,169 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+from typing import Any, Never
+
+from agent_framework import (
+ ChatAgent,
+ ChatMessage,
+ ConcurrentBuilder,
+ Executor,
+ Role,
+ Workflow,
+ WorkflowContext,
+ handler,
+)
+from agent_framework.azure import AzureOpenAIChatClient
+from azure.identity import AzureCliCredential
+
+"""
+Sample: Concurrent Orchestration with participant factories and Custom Aggregator
+
+Build a concurrent workflow with ConcurrentBuilder that fans out one prompt to
+multiple domain agents and fans in their responses.
+
+Override the default aggregator with a custom Executor class that uses
+AzureOpenAIChatClient.get_response() to synthesize a concise, consolidated summary
+from the experts' outputs.
+
+All participants and the aggregator are created via factory functions that return
+their respective ChatAgent or Executor instances.
+
+Using participant factories allows you to set up proper state isolation between workflow
+instances created by the same builder. This is particularly useful when you need to handle
+requests or tasks in parallel with stateful participants.
+
+Demonstrates:
+- ConcurrentBuilder().register_participants([...]).with_aggregator(callback)
+- Fan-out to agents and fan-in at an aggregator
+- Aggregation implemented via an LLM call (chat_client.get_response)
+- Workflow output yielded with the synthesized summary string
+
+Prerequisites:
+- Azure OpenAI configured for AzureOpenAIChatClient (az login + required env vars)
+"""
+
+
+def create_researcher() -> ChatAgent:
+ """Factory function to create a researcher agent instance."""
+ return AzureOpenAIChatClient(credential=AzureCliCredential()).create_agent(
+ instructions=(
+ "You're an expert market and product researcher. Given a prompt, provide concise, factual insights,"
+ " opportunities, and risks."
+ ),
+ name="researcher",
+ )
+
+
+def create_marketer() -> ChatAgent:
+ """Factory function to create a marketer agent instance."""
+ return AzureOpenAIChatClient(credential=AzureCliCredential()).create_agent(
+ instructions=(
+ "You're a creative marketing strategist. Craft compelling value propositions and target messaging"
+ " aligned to the prompt."
+ ),
+ name="marketer",
+ )
+
+
+def create_legal() -> ChatAgent:
+ """Factory function to create a legal/compliance agent instance."""
+ return AzureOpenAIChatClient(credential=AzureCliCredential()).create_agent(
+ instructions=(
+ "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns"
+ " based on the prompt."
+ ),
+ name="legal",
+ )
+
+
+class SummarizationExecutor(Executor):
+ """Custom aggregator executor that synthesizes expert outputs into a concise summary."""
+
+ def __init__(self) -> None:
+ super().__init__(id="summarization_executor")
+ self.chat_client = AzureOpenAIChatClient(credential=AzureCliCredential())
+
+ @handler
+ async def summarize_results(self, results: list[Any], ctx: WorkflowContext[Never, str]) -> None:
+ expert_sections: list[str] = []
+ for r in results:
+ try:
+ messages = getattr(r.agent_run_response, "messages", [])
+ final_text = messages[-1].text if messages and hasattr(messages[-1], "text") else "(no content)"
+ expert_sections.append(f"{getattr(r, 'executor_id', 'expert')}:\n{final_text}")
+ except Exception as e:
+ expert_sections.append(f"{getattr(r, 'executor_id', 'expert')}: (error: {type(e).__name__}: {e})")
+
+ # Ask the model to synthesize a concise summary of the experts' outputs
+ system_msg = ChatMessage(
+ Role.SYSTEM,
+ text=(
+ "You are a helpful assistant that consolidates multiple domain expert outputs "
+ "into one cohesive, concise summary with clear takeaways. Keep it under 200 words."
+ ),
+ )
+ user_msg = ChatMessage(Role.USER, text="\n\n".join(expert_sections))
+
+ response = await self.chat_client.get_response([system_msg, user_msg])
+
+ await ctx.yield_output(response.messages[-1].text if response.messages else "")
+
+
+async def run_workflow(workflow: Workflow, query: str) -> None:
+ events = await workflow.run(query)
+ outputs = events.get_outputs()
+
+ if outputs:
+ print(outputs[0]) # Get the first (and typically only) output
+ else:
+ raise RuntimeError("No outputs received from the workflow.")
+
+
+async def main() -> None:
+ # Create a concurrent builder with participant factories and a custom aggregator
+ # - register_participants([...]) accepts factory functions that return
+ # AgentProtocol (agents) or Executor instances.
+ # - register_aggregator(...) takes a factory function that returns an Executor instance.
+ concurrent_builder = (
+ ConcurrentBuilder()
+ .register_participants([create_researcher, create_marketer, create_legal])
+ .register_aggregator(SummarizationExecutor)
+ )
+
+ # Build workflow_a
+ workflow_a = concurrent_builder.build()
+
+ # Run workflow_a
+ # Context is maintained across runs
+ print("=== First Run on workflow_a ===")
+ await run_workflow(workflow_a, "We are launching a new budget-friendly electric bike for urban commuters.")
+ print("\n=== Second Run on workflow_a ===")
+ await run_workflow(workflow_a, "Refine your response to focus on the California market.")
+
+ # Build workflow_b
+ # This will create new instances of all participants and the aggregator
+ # The agents will also get new threads
+ workflow_b = concurrent_builder.build()
+ # Run workflow_b
+ # Context is not maintained across instances
+ # Should not expect mentions of electric bikes in the results
+ print("\n=== First Run on workflow_b ===")
+ await run_workflow(workflow_b, "Refine your response to focus on the California market.")
+
+ """
+ Sample Output:
+
+ === First Run on workflow_a ===
+ The budget-friendly electric bike market is poised for significant growth, driven by urbanization, ...
+
+ === Second Run on workflow_a ===
+ Launching a budget-friendly electric bike in California presents significant opportunities, driven ...
+
+ === First Run on workflow_b ===
+ To successfully penetrate the California market, consider these tailored strategies focused on ...
+ """
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py
new file mode 100644
index 0000000000..cec49e8634
--- /dev/null
+++ b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py
@@ -0,0 +1,241 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+"""
+Handoff Workflow with Code Interpreter File Generation Sample
+
+This sample demonstrates retrieving file IDs from code interpreter output
+in a handoff workflow context. A triage agent routes to a code specialist
+that generates a text file, and we verify the file_id is captured correctly
+from the streaming AgentRunUpdateEvent events.
+
+Verifies GitHub issue #2718: files generated by code interpreter in
+HandoffBuilder workflows can be properly retrieved.
+
+Toggle USE_V2_CLIENT to switch between:
+ - V1: AzureAIAgentClient (azure-ai-agents SDK)
+ - V2: AzureAIClient (azure-ai-projects 2.x with Responses API)
+
+IMPORTANT: When using V2 AzureAIClient with HandoffBuilder, each agent must
+have its own client instance. The V2 client binds to a single server-side
+agent name, so sharing a client between agents causes routing issues.
+
+Prerequisites:
+ - `az login` (Azure CLI authentication)
+ - V1: AZURE_AI_AGENT_PROJECT_CONNECTION_STRING
+ - V2: AZURE_AI_PROJECT_ENDPOINT, AZURE_AI_MODEL_DEPLOYMENT_NAME
+"""
+
+import asyncio
+from collections.abc import AsyncIterable
+from contextlib import asynccontextmanager
+from collections.abc import AsyncIterator
+
+from agent_framework import (
+ AgentRunUpdateEvent,
+ ChatAgent,
+ HandoffBuilder,
+ HandoffUserInputRequest,
+ HostedCodeInterpreterTool,
+ HostedFileContent,
+ RequestInfoEvent,
+ TextContent,
+ WorkflowEvent,
+ WorkflowRunState,
+ WorkflowStatusEvent,
+)
+from azure.identity.aio import AzureCliCredential
+
+# Toggle between V1 (AzureAIAgentClient) and V2 (AzureAIClient)
+USE_V2_CLIENT = False
+
+
+async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]:
+ """Collect all events from an async stream."""
+ return [event async for event in stream]
+
+
+def _handle_events(events: list[WorkflowEvent]) -> tuple[list[RequestInfoEvent], list[str]]:
+ """Process workflow events and extract file IDs and pending requests.
+
+ Returns:
+ Tuple of (pending_requests, file_ids_found)
+ """
+ requests: list[RequestInfoEvent] = []
+ file_ids: list[str] = []
+
+ for event in events:
+ if isinstance(event, WorkflowStatusEvent):
+ if event.state in {WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS}:
+ print(f"[status] {event.state.name}")
+
+ elif isinstance(event, RequestInfoEvent):
+ if isinstance(event.data, HandoffUserInputRequest):
+ print("\n=== Conversation So Far ===")
+ for msg in event.data.conversation:
+ speaker = msg.author_name or msg.role.value
+ text = msg.text or ""
+ txt = text[:200] + "..." if len(text) > 200 else text
+ print(f"- {speaker}: {txt}")
+ print("===========================\n")
+ requests.append(event)
+
+ elif isinstance(event, AgentRunUpdateEvent):
+ update = event.data
+ if update is None:
+ continue
+ for content in update.contents:
+ if isinstance(content, HostedFileContent):
+ file_ids.append(content.file_id)
+ print(f"[Found HostedFileContent: file_id={content.file_id}]")
+ elif isinstance(content, TextContent) and content.annotations:
+ for annotation in content.annotations:
+ if hasattr(annotation, "file_id") and annotation.file_id:
+ file_ids.append(annotation.file_id)
+ print(f"[Found file annotation: file_id={annotation.file_id}]")
+
+ return requests, file_ids
+
+
+@asynccontextmanager
+async def create_agents_v1(credential: AzureCliCredential) -> AsyncIterator[tuple[ChatAgent, ChatAgent]]:
+ """Create agents using V1 AzureAIAgentClient."""
+ from agent_framework.azure import AzureAIAgentClient
+
+ async with AzureAIAgentClient(credential=credential) as client:
+ triage = client.create_agent(
+ name="triage_agent",
+ instructions=(
+ "You are a triage agent. Route code-related requests to the code_specialist. "
+ "When the user asks to create or generate files, hand off to code_specialist "
+ "by calling handoff_to_code_specialist."
+ ),
+ )
+
+ code_specialist = client.create_agent(
+ name="code_specialist",
+ instructions=(
+ "You are a Python code specialist. Use the code interpreter to execute Python code "
+ "and create files when requested. Always save files to /mnt/data/ directory."
+ ),
+ tools=[HostedCodeInterpreterTool()],
+ )
+
+ yield triage, code_specialist
+
+
+@asynccontextmanager
+async def create_agents_v2(credential: AzureCliCredential) -> AsyncIterator[tuple[ChatAgent, ChatAgent]]:
+ """Create agents using V2 AzureAIClient.
+
+ Each agent needs its own client instance because the V2 client binds
+ to a single server-side agent name.
+ """
+ from agent_framework.azure import AzureAIClient
+
+ async with (
+ AzureAIClient(credential=credential) as triage_client,
+ AzureAIClient(credential=credential) as code_client,
+ ):
+ triage = triage_client.create_agent(
+ name="TriageAgent",
+ instructions=(
+ "You are a triage agent. Your ONLY job is to route requests to the appropriate specialist. "
+ "For code or file creation requests, call handoff_to_CodeSpecialist immediately. "
+ "Do NOT try to complete tasks yourself. Just hand off."
+ ),
+ )
+
+ code_specialist = code_client.create_agent(
+ name="CodeSpecialist",
+ instructions=(
+ "You are a Python code specialist. You have access to a code interpreter tool. "
+ "Use the code interpreter to execute Python code and create files. "
+ "Always save files to /mnt/data/ directory. "
+ "Do NOT discuss handoffs or routing - just complete the coding task directly."
+ ),
+ tools=[HostedCodeInterpreterTool()],
+ )
+
+ yield triage, code_specialist
+
+
+async def main() -> None:
+ """Run a simple handoff workflow with code interpreter file generation."""
+ client_version = "V2 (AzureAIClient)" if USE_V2_CLIENT else "V1 (AzureAIAgentClient)"
+ print(f"=== Handoff Workflow with Code Interpreter File Generation [{client_version}] ===\n")
+
+ async with AzureCliCredential() as credential:
+ create_agents = create_agents_v2 if USE_V2_CLIENT else create_agents_v1
+
+ async with create_agents(credential) as (triage, code_specialist):
+ workflow = (
+ HandoffBuilder()
+ .participants([triage, code_specialist])
+ .set_coordinator(triage)
+ .with_termination_condition(lambda conv: sum(1 for msg in conv if msg.role.value == "user") >= 2)
+ .build()
+ )
+
+ user_inputs = [
+ "Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it.",
+ "exit",
+ ]
+ input_index = 0
+ all_file_ids: list[str] = []
+
+ print(f"User: {user_inputs[0]}")
+ events = await _drain(workflow.run_stream(user_inputs[0]))
+ requests, file_ids = _handle_events(events)
+ all_file_ids.extend(file_ids)
+ input_index += 1
+
+ while requests:
+ request = requests[0]
+ if input_index >= len(user_inputs):
+ break
+ user_input = user_inputs[input_index]
+ print(f"\nUser: {user_input}")
+
+ responses = {request.request_id: user_input}
+ events = await _drain(workflow.send_responses_streaming(responses))
+ requests, file_ids = _handle_events(events)
+ all_file_ids.extend(file_ids)
+ input_index += 1
+
+ print("\n" + "=" * 50)
+ if all_file_ids:
+ print(f"SUCCESS: Found {len(all_file_ids)} file ID(s) in handoff workflow:")
+ for fid in all_file_ids:
+ print(f" - {fid}")
+ else:
+ print("WARNING: No file IDs captured from the handoff workflow.")
+ print("=" * 50)
+
+ """
+ Sample Output:
+
+ User: Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it.
+ [Found HostedFileContent: file_id=assistant-JT1sA...]
+
+ === Conversation So Far ===
+ - user: Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it.
+ - triage_agent: I am handing off your request to create the text file "hello.txt" with the specified content to the code specialist. They will assist you shortly.
+ - code_specialist: The file "hello.txt" has been created with the content "Hello from handoff workflow!". You can download it using the link below:
+
+ [hello.txt](sandbox:/mnt/data/hello.txt)
+ ===========================
+
+ [status] IDLE_WITH_PENDING_REQUESTS
+
+ User: exit
+ [status] IDLE
+
+ ==================================================
+ SUCCESS: Found 1 file ID(s) in handoff workflow:
+ - assistant-JT1sA...
+ ==================================================
+ """ # noqa: E501
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/uv.lock b/python/uv.lock
index f227838350..04f3469175 100644
--- a/python/uv.lock
+++ b/python/uv.lock
@@ -89,7 +89,7 @@ wheels = [
[[package]]
name = "agent-framework"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { virtual = "." }
dependencies = [
{ name = "agent-framework-core", extra = ["all"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -156,7 +156,7 @@ docs = [
[[package]]
name = "agent-framework-a2a"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/a2a" }
dependencies = [
{ name = "a2a-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -171,7 +171,7 @@ requires-dist = [
[[package]]
name = "agent-framework-ag-ui"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/ag-ui" }
dependencies = [
{ name = "ag-ui-protocol", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -201,7 +201,7 @@ provides-extras = ["dev"]
[[package]]
name = "agent-framework-anthropic"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/anthropic" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -216,7 +216,7 @@ requires-dist = [
[[package]]
name = "agent-framework-azure-ai"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/azure-ai" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -235,7 +235,7 @@ requires-dist = [
[[package]]
name = "agent-framework-azure-ai-search"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/azure-ai-search" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -250,7 +250,7 @@ requires-dist = [
[[package]]
name = "agent-framework-azurefunctions"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/azurefunctions" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -275,7 +275,7 @@ dev = [{ name = "types-python-dateutil", specifier = ">=2.9.0" }]
[[package]]
name = "agent-framework-chatkit"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/chatkit" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -290,7 +290,7 @@ requires-dist = [
[[package]]
name = "agent-framework-copilotstudio"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/copilotstudio" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -305,7 +305,7 @@ requires-dist = [
[[package]]
name = "agent-framework-core"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/core" }
dependencies = [
{ name = "azure-identity", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -356,7 +356,7 @@ requires-dist = [
{ name = "agent-framework-purview", marker = "extra == 'all'", editable = "packages/purview" },
{ name = "agent-framework-redis", marker = "extra == 'all'", editable = "packages/redis" },
{ name = "azure-identity", specifier = ">=1,<2" },
- { name = "mcp", extras = ["ws"], specifier = ">=1.13" },
+ { name = "mcp", extras = ["ws"], specifier = ">=1.23" },
{ name = "openai", specifier = ">=1.99.0" },
{ name = "opentelemetry-api", specifier = ">=1.39.0" },
{ name = "opentelemetry-exporter-otlp-proto-grpc", specifier = ">=1.39.0" },
@@ -371,7 +371,7 @@ provides-extras = ["all"]
[[package]]
name = "agent-framework-declarative"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/declarative" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -396,7 +396,7 @@ dev = [{ name = "types-pyyaml" }]
[[package]]
name = "agent-framework-devui"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/devui" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -430,7 +430,7 @@ provides-extras = ["dev", "all"]
[[package]]
name = "agent-framework-lab"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/lab" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -521,7 +521,7 @@ dev = [
[[package]]
name = "agent-framework-mem0"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/mem0" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -536,7 +536,7 @@ requires-dist = [
[[package]]
name = "agent-framework-purview"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/purview" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -553,7 +553,7 @@ requires-dist = [
[[package]]
name = "agent-framework-redis"
-version = "1.0.0b251209"
+version = "1.0.0b251211"
source = { editable = "packages/redis" }
dependencies = [
{ name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },