diff --git a/src/agents/mcp/server.py b/src/agents/mcp/server.py index 4c0b7b5fe9..bc08ffaa49 100644 --- a/src/agents/mcp/server.py +++ b/src/agents/mcp/server.py @@ -93,6 +93,7 @@ def __init__( require_approval: RequireApprovalSetting = None, failure_error_function: ToolErrorFunction | None | _UnsetType = _UNSET, tool_meta_resolver: MCPToolMetaResolver | None = None, + tool_name_prefix: str | None = None, ): """ Args: @@ -110,6 +111,9 @@ def __init__( SDK default) will be used. tool_meta_resolver: Optional callable that produces MCP request metadata (`_meta`) for tool calls. It is invoked by the Agents SDK before calling `call_tool`. + tool_name_prefix: Optional prefix to add to tool names from this server. Useful for + avoiding name collisions when multiple MCP servers provide tools with the same name. + For example, with prefix "filesystem", tool "read_file" becomes "filesystem_read_file". """ self.use_structured_content = use_structured_content self._needs_approval_policy = self._normalize_needs_approval( @@ -117,6 +121,7 @@ def __init__( ) self._failure_error_function = failure_error_function self.tool_meta_resolver = tool_meta_resolver + self.tool_name_prefix = tool_name_prefix @abc.abstractmethod async def connect(self): @@ -370,6 +375,23 @@ async def _apply_tool_filter( raise UserError("run_context and agent are required for dynamic tool filtering") return await self._apply_dynamic_tool_filter(tools, run_context, agent) + def _apply_tool_name_prefix(self, tools: list[MCPTool]) -> list[MCPTool]: + """Apply tool name prefix to avoid collisions between multiple MCP servers.""" + if not self.tool_name_prefix: + return tools + + prefixed_tools = [] + for tool in tools: + # Create a new MCPTool with prefixed name + prefixed_tool = MCPTool( + name=f"{self.tool_name_prefix}_{tool.name}", + description=tool.description, + inputSchema=tool.inputSchema, + ) + prefixed_tools.append(prefixed_tool) + + return prefixed_tools + def _apply_static_tool_filter( self, tools: list[MCPTool], static_filter: ToolFilterStatic ) -> list[MCPTool]: @@ -582,6 +604,11 @@ async def list_tools( filtered_tools = tools if self.tool_filter is not None: filtered_tools = await self._apply_tool_filter(filtered_tools, run_context, agent) + + # Apply tool name prefix if configured + if self.tool_name_prefix: + filtered_tools = self._apply_tool_name_prefix(filtered_tools) + return filtered_tools except httpx.HTTPStatusError as e: status_code = e.response.status_code @@ -606,12 +633,17 @@ async def call_tool( session = self.session assert session is not None + # Strip prefix if configured and present + actual_tool_name = tool_name + if self.tool_name_prefix and tool_name.startswith(f"{self.tool_name_prefix}_"): + actual_tool_name = tool_name[len(self.tool_name_prefix) + 1:] + try: - self._validate_required_parameters(tool_name=tool_name, arguments=arguments) + self._validate_required_parameters(tool_name=actual_tool_name, arguments=arguments) if meta is None: - return await self._run_with_retries(lambda: session.call_tool(tool_name, arguments)) + return await self._run_with_retries(lambda: session.call_tool(actual_tool_name, arguments)) return await self._run_with_retries( - lambda: session.call_tool(tool_name, arguments, meta=meta) + lambda: session.call_tool(actual_tool_name, arguments, meta=meta) ) except httpx.HTTPStatusError as e: status_code = e.response.status_code diff --git a/src/agents/models/chatcmpl_converter.py b/src/agents/models/chatcmpl_converter.py index b7a566faf0..5a5028fdc9 100644 --- a/src/agents/models/chatcmpl_converter.py +++ b/src/agents/models/chatcmpl_converter.py @@ -415,6 +415,37 @@ def extract_all_content( raise UserError(f"Unknown content: {c}") return out + @classmethod + def _supports_reasoning_content( + cls, + model: str, + item_model: str | None, + item_provider_data: dict | None, + ) -> bool: + """Check if the model supports reasoning_content field in assistant messages. + + Supported models: + - DeepSeek: deepseek-* + - Zhipu GLM: glm-*, zhipu-* + - Moonshot Kimi: kimi-*, moonshot-* + """ + # Models that require/support reasoning_content + reasoning_models = ("deepseek", "glm", "zhipu", "kimi", "moonshot") + + model_lower = model.lower() + is_reasoning_model = any(m in model_lower for m in reasoning_models) + + if not is_reasoning_model: + return False + + # Check if item also from reasoning model + if item_provider_data == {}: + return True # Backward compatibility + if item_model: + item_model_lower = item_model.lower() + return any(m in item_model_lower for m in reasoning_models) + return True + @classmethod def items_to_messages( cls, @@ -740,16 +771,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: # This preserves the original behavior pending_thinking_blocks = reconstructed_thinking_blocks - # DeepSeek requires reasoning_content field in assistant messages with tool calls - # Items may not all originate from DeepSeek, so need to check for model match. + # DeepSeek/GLM/Kimi require reasoning_content field in assistant messages with tool calls + # Items may not all originate from the same model, so need to check for model match. # For backward compatibility, if provider_data is missing, ignore the check. + # Supported models: deepseek, zhipu/glm, moonshot/kimi elif ( model - and "deepseek" in model.lower() - and ( - (item_model and "deepseek" in item_model.lower()) - or item_provider_data == {} - ) + and self._supports_reasoning_content(model, item_model, item_provider_data) ): summary_items = reasoning_item.get("summary", []) if summary_items: