|
| 1 | +# pylint: disable=line-too-long,useless-suppression |
| 2 | +# ------------------------------------ |
| 3 | +# Copyright (c) Microsoft Corporation. |
| 4 | +# Licensed under the MIT License. |
| 5 | +# ------------------------------------ |
| 6 | + |
| 7 | +""" |
| 8 | +DESCRIPTION: |
| 9 | + This sample demonstrates how to directly interact with MCP (Model Context Protocol) tools |
| 10 | + using the low-level MCP client library to connect to the Foundry Project's MCP tools API: |
| 11 | + {AZURE_AI_PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview |
| 12 | +
|
| 13 | + For agent-based MCP tool usage, see samples in samples/agents/tools/sample_agent_mcp.py |
| 14 | + and related files in that directory. |
| 15 | +
|
| 16 | + WORKFLOW: |
| 17 | + This sample demonstrates a typical MCP client workflow: |
| 18 | + 1. Establish connection to the Foundry Project MCP endpoint using ClientSession |
| 19 | + 2. Initialize the session and discover available tools |
| 20 | + 3. Invoke tools programmatically with specific arguments and metadata |
| 21 | + 4. Process and save tool outputs (e.g., writing image generation results to a file) |
| 22 | + 5. Chain multiple tool calls together (code interpreter → image generation → file search) |
| 23 | +
|
| 24 | +USAGE: |
| 25 | + python sample_mcp_tool_async.py |
| 26 | +
|
| 27 | + Before running the sample: |
| 28 | +
|
| 29 | + pip install "azure-ai-projects>=2.0.0b1" azure-identity python-dotenv mcp |
| 30 | +
|
| 31 | + Set these environment variables with your own values: |
| 32 | + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview |
| 33 | + page of your Microsoft Foundry portal. |
| 34 | + 2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in |
| 35 | + the "Models + endpoints" tab in your Microsoft Foundry project. |
| 36 | + 3) (Optional) LOG_LEVEL - Logging level for HTTP client debugging. Valid values: |
| 37 | + - CRITICAL or 50 - Suppresses all logs except critical errors |
| 38 | + - FATAL - same as CRITICAL |
| 39 | + - ERROR or 40 - Shows errors only |
| 40 | + - WARNING or WARN or 30 - Shows warnings and errors |
| 41 | + - INFO or 20 - Shows informational messages, warnings, and errors |
| 42 | + - DEBUG or 10 - Shows detailed HTTP requests/responses and all other logs |
| 43 | + - NOTSET or 0 - Uses parent logger configuration |
| 44 | +""" |
| 45 | + |
| 46 | +import asyncio |
| 47 | +import base64 |
| 48 | +import os |
| 49 | +import logging |
| 50 | +from dotenv import load_dotenv |
| 51 | +from azure.ai.projects.aio import AIProjectClient |
| 52 | +from azure.identity.aio import DefaultAzureCredential |
| 53 | +from mcp import ClientSession |
| 54 | +from mcp.types import ImageContent |
| 55 | +from mcp.client.streamable_http import streamablehttp_client |
| 56 | + |
| 57 | +load_dotenv() |
| 58 | + |
| 59 | +# Configure logging level from environment variable |
| 60 | +# Set LOG_LEVEL=DEBUG to see detailed HTTP requests and responses |
| 61 | +log_level = os.getenv("LOG_LEVEL", "").upper() |
| 62 | +if log_level: |
| 63 | + logging.basicConfig(level=getattr(logging, log_level, logging.CRITICAL)) |
| 64 | + # Enable httpx logging to see HTTP requests at the same level |
| 65 | + logging.getLogger("httpx").setLevel(getattr(logging, log_level, logging.CRITICAL)) |
| 66 | + |
| 67 | +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] |
| 68 | + |
| 69 | + |
| 70 | +async def main(): |
| 71 | + |
| 72 | + async with ( |
| 73 | + DefaultAzureCredential() as credential, |
| 74 | + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, |
| 75 | + project_client.get_openai_client() as openai_client, |
| 76 | + streamablehttp_client( |
| 77 | + url=f"{endpoint}/mcp_tools?api-version=2025-05-15-preview", |
| 78 | + headers={"Authorization": f"Bearer {(await credential.get_token('https://ai.azure.com')).token}"}, |
| 79 | + ) as (read_stream, write_stream, _), |
| 80 | + ClientSession(read_stream, write_stream) as session, |
| 81 | + ): |
| 82 | + |
| 83 | + # Initialize the connection |
| 84 | + await session.initialize() |
| 85 | + # List available tools |
| 86 | + tools = await session.list_tools() |
| 87 | + print(f"Available tools: {[tool.name for tool in tools.tools]}") |
| 88 | + |
| 89 | + # For each tool, print its details |
| 90 | + for tool in tools.tools: |
| 91 | + print(f"\n\nTool Name: {tool.name}, Input Schema: {tool.inputSchema}") |
| 92 | + |
| 93 | + # Run the code interpreter tool |
| 94 | + code_interpreter_result = await session.call_tool( |
| 95 | + name="code_interpreter", |
| 96 | + arguments={"code": "print('Hello from Microsoft Foundry MCP Code Interpreter tool!')"}, |
| 97 | + ) |
| 98 | + print(f"\n\nCode Interpreter Output: {code_interpreter_result.content}") |
| 99 | + |
| 100 | + # Run the image_generation tool |
| 101 | + image_generation_result = await session.call_tool( |
| 102 | + name="image_generation", |
| 103 | + arguments={"prompt": "Draw a cute puppy riding a skateboard"}, |
| 104 | + meta={"imagegen_model_deployment_name": os.getenv("IMAGE_GEN_DEPLOYMENT_NAME", "")}, |
| 105 | + ) |
| 106 | + |
| 107 | + # Save the image generation output to a file |
| 108 | + if image_generation_result.content and isinstance(image_generation_result.content[0], ImageContent): |
| 109 | + print("\nDownloading generated image...") |
| 110 | + filename = "puppy.png" |
| 111 | + file_path = os.path.abspath(filename) |
| 112 | + |
| 113 | + with open(file_path, "wb") as f: |
| 114 | + f.write(base64.b64decode(image_generation_result.content[0].data)) |
| 115 | + |
| 116 | + # Create a vector store |
| 117 | + vector_store = await openai_client.vector_stores.create( |
| 118 | + name="sample_vector_store", |
| 119 | + ) |
| 120 | + |
| 121 | + vector_store_file = await openai_client.vector_stores.files.upload_and_poll( |
| 122 | + vector_store_id=vector_store.id, |
| 123 | + file=open( |
| 124 | + os.path.abspath(os.path.join(os.path.dirname(__file__), "./assets/product_info.md")), |
| 125 | + "rb", |
| 126 | + ), |
| 127 | + ) |
| 128 | + |
| 129 | + print(f"\n\nUploaded file, file ID: {vector_store_file.id} to vector store ID: {vector_store.id}") |
| 130 | + |
| 131 | + # Call the file_search tool |
| 132 | + file_search_result = await session.call_tool( |
| 133 | + name="file_search", |
| 134 | + arguments={"queries": ["What feature does Smart Eyewear offer?"]}, |
| 135 | + meta={"vector_store_ids": [vector_store.id]}, |
| 136 | + ) |
| 137 | + print(f"\n\nFile Search Output: {file_search_result.content}") |
| 138 | + |
| 139 | + |
| 140 | +if __name__ == "__main__": |
| 141 | + asyncio.run(main()) |
0 commit comments