diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/faq.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/faq.md index 26c52628d380..66034743af35 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/faq.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/faq.md @@ -15,29 +15,67 @@ A: You can specify the directory where files are stored by setting the `--appdir Yes. AutoGen standardizes on the openai model api format, and you can use any api server that offers an openai compliant endpoint. -AutoGen Studio is based on declaritive specifications which applies to models as well. Agents can include a model_client field which specifies the model endpoint details including `model`, `api_key`, `base_url`, `model type`. +AutoGen Studio is based on declaritive specifications which applies to models as well. Agents can include a model_client field which specifies the model endpoint details including `model`, `api_key`, `base_url`, `model type`. Note, you can define your [model client](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html) in python and dump it to a json file for use in AutoGen Studio. -An example of the openai model client is shown below: +In the following sample, we will define an OpenAI, AzureOpenAI and a local model client in python and dump them to a json file. + +```python +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient +from autogen_core.models import ModelInfo + +model_client=OpenAIChatCompletionClient( + model="gpt-4o-mini", + ) +print(model_client.dump_component().model_dump_json()) + +az_model_client = AzureOpenAIChatCompletionClient( + azure_deployment="{your-azure-deployment}", + model="gpt-4o", + api_version="2024-06-01", + azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/", + api_key="sk-...", +) +print(az_model_client.dump_component().model_dump_json()) + +mistral_vllm_model = OpenAIChatCompletionClient( + model="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", + base_url="http://localhost:1234/v1", + model_info=ModelInfo(vision=False, function_calling=True, json_output=False, family="unknown"), + ) +print(mistral_vllm_model.dump_component().model_dump_json()) +``` + +OpenAI ```json { - "model": "gpt-4o-mini", - "model_type": "OpenAIChatCompletionClient", - "api_key": "your-api-key" + "provider": "autogen_ext.models.openai.OpenAIChatCompletionClient", + "component_type": "model", + "version": 1, + "component_version": 1, + "description": "Chat completion client for OpenAI hosted models.", + "label": "OpenAIChatCompletionClient", + "config": { "model": "gpt-4o-mini" } } ``` -An example of the azure openai model client is shown below: +Azure OpenAI ```json { - "model": "gpt-4o-mini", - "model_type": "AzureOpenAIChatCompletionClient", - "azure_deployment": "gpt-4o-mini", - "api_version": "2024-02-15-preview", - "azure_endpoint": "https://your-endpoint.openai.azure.com/", - "api_key": "your-api-key", - "component_type": "model" + "provider": "autogen_ext.models.openai.AzureOpenAIChatCompletionClient", + "component_type": "model", + "version": 1, + "component_version": 1, + "description": "Chat completion client for Azure OpenAI hosted models.", + "label": "AzureOpenAIChatCompletionClient", + "config": { + "model": "gpt-4o", + "api_key": "sk-...", + "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", + "azure_deployment": "{your-azure-deployment}", + "api_version": "2024-06-01" + } } ``` @@ -45,21 +83,27 @@ Have a local model server like Ollama, vLLM or LMStudio that provide an OpenAI c ```json { - "model": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF", - "model_type": "OpenAIChatCompletionClient", - "base_url": "http://localhost:1234/v1", - "api_version": "1.0", + "provider": "autogen_ext.models.openai.OpenAIChatCompletionClient", "component_type": "model", - "model_capabilities": { - "vision": false, - "function_calling": true, - "json_output": false + "version": 1, + "component_version": 1, + "description": "Chat completion client for OpenAI hosted models.", + "label": "OpenAIChatCompletionClient", + "config": { + "model": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF", + "model_info": { + "vision": false, + "function_calling": true, + "json_output": false, + "family": "unknown" + }, + "base_url": "http://localhost:1234/v1" } } ``` ```{caution} -It is important that you add the `model_capabilities` field to the model client specification for custom models. This is used by the framework instantiate and use the model correctly. Also, the `AssistantAgent` and many other agents in AgentChat require the model to have the `function_calling` capability. +It is important that you add the `model_info` field to the model client specification for custom models. This is used by the framework instantiate and use the model correctly. Also, the `AssistantAgent` and many other agents in AgentChat require the model to have the `function_calling` capability. ``` ## Q: The server starts but I can't access the UI diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md index 09de3f9ac14f..608adf78a093 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md @@ -12,7 +12,9 @@ myst: AutoGen Studio is a low-code interface built to help you rapidly prototype AI agents, enhance them with tools, compose them into teams and interact with them to accomplish tasks. It is built on [AutoGen AgentChat](https://microsoft.github.io/autogen) - a high-level API for building multi-agent applications. -![AutoGen Studio](https://media.githubusercontent.com/media/microsoft/autogen/refs/heads/main/python/packages/autogen-studio/docs/ags_screen.png) +> See a video tutorial on AutoGen Studio v0.4 (02/25) - [https://youtu.be/oum6EI7wohM](https://youtu.be/oum6EI7wohM) + +[![A Friendly Introduction to AutoGen Studio v0.4](https://img.youtube.com/vi/oum6EI7wohM/maxresdefault.jpg)](https://www.youtube.com/watch?v=oum6EI7wohM) Code for AutoGen Studio is on GitHub at [microsoft/autogen](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-studio) diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md index e185a9128566..dd2cc6d1e727 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md @@ -10,7 +10,9 @@ myst: AutoGen Studio provides a Team Builder interface where developers can define multiple components and behaviors. Users can create teams, add agents to teams, attach tools and models to agents, and define team termination conditions. After defining a team, users can test it in the Playground view to accomplish various tasks through direct interaction. -![AutoGen Studio](https://media.githubusercontent.com/media/microsoft/autogen/refs/heads/main/python/packages/autogen-studio/docs/ags_screen.png) +> See a video tutorial on AutoGen Studio v0.4 (02/25) - [https://youtu.be/oum6EI7wohM](https://youtu.be/oum6EI7wohM) + +[![A Friendly Introduction to AutoGen Studio v0.4](https://img.youtube.com/vi/oum6EI7wohM/maxresdefault.jpg)](https://www.youtube.com/watch?v=oum6EI7wohM) ## Declarative Specification of Componenents @@ -100,8 +102,6 @@ This example shows a team with a single agent, using the `RoundRobinGroupChat` t ## Building an Agent Team -
-
AutoGen Studio integrates closely with all component abstractions provided by AutoGen AgentChat, including {py:class}`~autogen_agentchat.teams`, {py:class}`~autogen_agentchat.agents`, {py:class}`~autogen_core.models`, {py:class}`~autogen_core.tools`, and termination {py:class}`~autogen_agentchat.conditions`. @@ -117,6 +117,8 @@ Team Builder Operations: - Agents: Add models and tools - Save team configurations +Note: For each node in the visual builder, you can click on the edit icon (top right) to view and edit the JSON configuration. + ## Gallery - Sharing and Reusing Components A Gallery is a collection of components - teams, agents, models, tools, and terminations - that can be shared and reused across projects. diff --git a/python/packages/autogen-studio/autogenstudio/gallery/builder.py b/python/packages/autogen-studio/autogenstudio/gallery/builder.py index 7ef14c2061b6..0d9a36f83471 100644 --- a/python/packages/autogen-studio/autogenstudio/gallery/builder.py +++ b/python/packages/autogen-studio/autogenstudio/gallery/builder.py @@ -148,7 +148,7 @@ def create_default_gallery() -> Gallery: mistral_vllm_model = OpenAIChatCompletionClient( model="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", base_url="http://localhost:1234/v1", - model_info=ModelInfo(vision=False, function_calling=True, json_output=False), + model_info=ModelInfo(vision=False, function_calling=True, json_output=False, family="unknown"), ) builder.add_model( mistral_vllm_model.dump_component(), @@ -236,6 +236,7 @@ def create_default_gallery() -> Gallery: model_client=base_model, termination_condition=web_termination, ) + builder.add_team( websurfer_team.dump_component(), label="Web Agent Team (Operator)", @@ -256,8 +257,8 @@ def create_default_gallery() -> Gallery: builder.add_tool( tools.fetch_webpage_tool.dump_component(), - label="Webpage Generation Tool", - description="A tool that generates a webpage from a list of images. Requires beautifulsoup4 html2text library to function.", + label="Fetch Webpage Tool", + description="A tool that fetches the content of a webpage and converts it to markdown. Requires the requests and beautifulsoup4 library to function.", ) builder.add_tool( @@ -272,6 +273,83 @@ def create_default_gallery() -> Gallery: description="A tool that performs Google searches using the Google Custom Search API. Requires the requests library, [GOOGLE_API_KEY, GOOGLE_CSE_ID] to be set, env variable to function.", ) + # Create deep research agent + model_client = OpenAIChatCompletionClient(model="gpt-4o", temperature=0.7) + + research_assistant = AssistantAgent( + name="research_assistant", + description="A research assistant that performs web searches and analyzes information", + model_client=model_client, + tools=[tools.google_search_tool, tools.fetch_webpage_tool], + system_message="""You are a research assistant focused on finding accurate information. + Use the google_search tool to find relevant information. + Break down complex queries into specific search terms. + Always verify information across multiple sources when possible. + When you find relevant information, explain why it's relevant and how it connects to the query. When you get feedback from the a verifier agent, use your tools to act on the feedback and make progress.""", + ) + + verifier = AssistantAgent( + name="verifier", + description="A verification specialist who ensures research quality and completeness", + model_client=model_client, + system_message="""You are a research verification specialist. + Your role is to: + 1. Verify that search queries are effective and suggest improvements if needed + 2. Explore drill downs where needed e.g, if the answer is likely in a link in the returned search results, suggest clicking on the link + 3. Suggest additional angles or perspectives to explore. Be judicious in suggesting new paths to avoid scope creep or wasting resources, if the task appears to be addressed and we can provide a report, do this and respond with "TERMINATE". + 4. Track progress toward answering the original question + 5. When the research is complete, provide a detailed summary in markdown format. For incomplete research, end your message with "CONTINUE RESEARCH". For complete research, end your message with APPROVED. + Your responses should be structured as: + - Progress Assessment + - Gaps/Issues (if any) + - Suggestions (if needed) + - Next Steps or Final Summary""", + ) + + summary_agent = AssistantAgent( + name="summary_agent", + description="A summary agent that provides a detailed markdown summary of the research as a report to the user.", + model_client=model_client, + system_message="""You are a summary agent. Your role is to provide a detailed markdown summary of the research as a report to the user. Your report should have a reasonable title that matches the research question and should summarize the key details in the results found in natural an actionable manner. The main results/answer should be in the first paragraph. + Your report should end with the word "TERMINATE" to signal the end of the conversation.""", + ) + + termination = TextMentionTermination("TERMINATE") | MaxMessageTermination(max_messages=30) + + selector_prompt = """You are coordinating a research team by selecting the team member to speak/act next. The following team member roles are available: + {roles}. + The research_assistant performs searches and analyzes information. + The verifier evaluates progress and ensures completeness. + The summary_agent provides a detailed markdown summary of the research as a report to the user. + + Given the current context, select the most appropriate next speaker. + The research_assistant should search and analyze. + The verifier should evaluate progress and guide the research (select this role is there is a need to verify/evaluate progress). You should ONLY select the summary_agent role if the research is complete and it is time to generate a report. + + Base your selection on: + 1. Current stage of research + 2. Last speaker's findings or suggestions + 3. Need for verification vs need for new information + Read the following conversation. Then select the next role from {participants} to play. Only return the role. + + {history} + + Read the above conversation. Then select the next role from {participants} to play. ONLY RETURN THE ROLE.""" + + deep_research_team = SelectorGroupChat( + participants=[research_assistant, verifier, summary_agent], + model_client=model_client, + termination_condition=termination, + selector_prompt=selector_prompt, + allow_repeated_speaker=True, + ) + + builder.add_team( + deep_research_team.dump_component(), + label="Deep Research Team", + description="A team that performs deep research using web searches, verification, and summarization.", + ) + return builder.build() diff --git a/python/packages/autogen-studio/autogenstudio/version.py b/python/packages/autogen-studio/autogenstudio/version.py index 171811d227da..f1a865c952a5 100644 --- a/python/packages/autogen-studio/autogenstudio/version.py +++ b/python/packages/autogen-studio/autogenstudio/version.py @@ -1,3 +1,3 @@ -VERSION = "0.4.0" +VERSION = "0.4.1" __version__ = VERSION APP_NAME = "autogenstudio" diff --git a/python/packages/autogen-studio/frontend/src/components/sidebar.tsx b/python/packages/autogen-studio/frontend/src/components/sidebar.tsx index 1aa1ba4abdb4..b01672d81829 100644 --- a/python/packages/autogen-studio/frontend/src/components/sidebar.tsx +++ b/python/packages/autogen-studio/frontend/src/components/sidebar.tsx @@ -220,7 +220,7 @@ const Sidebar = ({ link, meta, isMobile }: SidebarProps) => { ], }) } - className="group flex gap-x-3 rounded-md p-2 text-sm font-medium text-primary hover:text-accent hover:bg-secondary justify-center" + className="group hidden flex gap-x-3 rounded-md p-2 text-sm font-medium text-primary hover:text-accent hover:bg-secondary justify-center" > diff --git a/python/packages/autogen-studio/frontend/src/components/views/gallery/default_gallery.json b/python/packages/autogen-studio/frontend/src/components/views/gallery/default_gallery.json index 6572c57048e2..1a8b4e555435 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/gallery/default_gallery.json +++ b/python/packages/autogen-studio/frontend/src/components/views/gallery/default_gallery.json @@ -1,10 +1,11 @@ { "id": "gallery_default", "name": "Default Component Gallery", + "url": null, "metadata": { "author": "AutoGen Team", - "created_at": "2025-02-01T17:12:38.179093", - "updated_at": "2025-02-01T17:12:38.701658", + "created_at": "2025-02-06T11:52:58.243877", + "updated_at": "2025-02-06T11:52:58.497032", "version": "1.0.0", "description": "A default gallery containing basic components for human-in-loop conversations", "tags": ["human-in-loop", "assistant", "web agents"], @@ -53,7 +54,7 @@ "description": "Create custom tools by wrapping standard Python functions.", "label": "FunctionTool", "config": { - "source_code": "def calculator(a: float, b: float, operator: str) -> str:\n try:\n if operator == \"+\":\n return str(a + b)\n elif operator == \"-\":\n return str(a - b)\n elif operator == \"*\":\n return str(a * b)\n elif operator == \"/\":\n if b == 0:\n return \"Error: Division by zero\"\n return str(a / b)\n else:\n return \"Error: Invalid operator. Please use +, -, *, or /\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n", + "source_code": "def calculator(a: float, b: float, operator: str) -> str:\n try:\n if operator == \"+\":\n return str(a + b)\n elif operator == \"-\":\n return str(a - b)\n elif operator == \"*\":\n return str(a * b)\n elif operator == \"/\":\n if b == 0:\n return \"Error: Division by zero\"\n return str(a / b)\n else:\n return \"Error: Invalid operator. Please use +, -, *, or /\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n", "name": "calculator", "description": "A simple calculator that performs basic arithmetic operations", "global_imports": [], @@ -250,6 +251,249 @@ "selector_prompt": "You are the cordinator of role play game. The following roles are available:\n{roles}. Given a task, the websurfer_agent will be tasked to address it by browsing the web and providing information. The assistant_agent will be tasked with verifying the information provided by the websurfer_agent and summarizing the information to present a final answer to the user. If the task needs assistance from a human user (e.g., providing feedback, preferences, or the task is stalled), you should select the user_proxy role to provide the necessary information.\n\nRead the following conversation. Then select the next role from {participants} to play. Only return the role.\n\n{history}\n\nRead the above conversation. Then select the next role from {participants} to play. Only return the role.", "allow_repeated_speaker": false } + }, + { + "provider": "autogen_agentchat.teams.SelectorGroupChat", + "component_type": "team", + "version": 1, + "component_version": 1, + "description": "A team that performs deep research using web searches, verification, and summarization.", + "label": "Deep Research Team", + "config": { + "participants": [ + { + "provider": "autogen_agentchat.agents.AssistantAgent", + "component_type": "agent", + "version": 1, + "component_version": 1, + "description": "An agent that provides assistance with tool use.", + "label": "AssistantAgent", + "config": { + "name": "research_assistant", + "model_client": { + "provider": "autogen_ext.models.openai.OpenAIChatCompletionClient", + "component_type": "model", + "version": 1, + "component_version": 1, + "description": "Chat completion client for OpenAI hosted models.", + "label": "OpenAIChatCompletionClient", + "config": { + "temperature": 0.7, + "model": "gpt-4o" + } + }, + "tools": [ + { + "provider": "autogen_core.tools.FunctionTool", + "component_type": "tool", + "version": 1, + "component_version": 1, + "description": "Create custom tools by wrapping standard Python functions.", + "label": "FunctionTool", + "config": { + "source_code": "async def google_search(\n query: str,\n num_results: int = 5,\n include_snippets: bool = True,\n include_content: bool = True,\n content_max_length: Optional[int] = 15000,\n language: str = \"en\",\n country: Optional[str] = None,\n safe_search: bool = True,\n) -> List[Dict[str, str]]:\n \"\"\"\n Perform a Google search using the Custom Search API and optionally fetch webpage content.\n\n Args:\n query: Search query string\n num_results: Number of results to return (max 10)\n include_snippets: Include result snippets in output\n include_content: Include full webpage content in markdown format\n content_max_length: Maximum length of webpage content (if included)\n language: Language code for search results (e.g., en, es, fr)\n country: Optional country code for search results (e.g., us, uk)\n safe_search: Enable safe search filtering\n\n Returns:\n List[Dict[str, str]]: List of search results, each containing:\n - title: Result title\n - link: Result URL\n - snippet: Result description (if include_snippets=True)\n - content: Webpage content in markdown (if include_content=True)\n \"\"\"\n api_key = os.getenv(\"GOOGLE_API_KEY\")\n cse_id = os.getenv(\"GOOGLE_CSE_ID\")\n\n if not api_key or not cse_id:\n raise ValueError(\"Missing required environment variables. Please set GOOGLE_API_KEY and GOOGLE_CSE_ID.\")\n\n num_results = min(max(1, num_results), 10)\n\n async def fetch_page_content(url: str, max_length: Optional[int] = 50000) -> str:\n \"\"\"Helper function to fetch and convert webpage content to markdown\"\"\"\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\"}\n\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n\n # Convert relative URLs to absolute\n for tag in soup.find_all([\"a\", \"img\"]):\n if tag.get(\"href\"):\n tag[\"href\"] = urljoin(url, tag[\"href\"])\n if tag.get(\"src\"):\n tag[\"src\"] = urljoin(url, tag[\"src\"])\n\n h2t = html2text.HTML2Text()\n h2t.body_width = 0\n h2t.ignore_images = False\n h2t.ignore_emphasis = False\n h2t.ignore_links = False\n h2t.ignore_tables = False\n\n markdown = h2t.handle(str(soup))\n\n if max_length and len(markdown) > max_length:\n markdown = markdown[:max_length] + \"\\n...(truncated)\"\n\n return markdown.strip()\n\n except Exception as e:\n return f\"Error fetching content: {str(e)}\"\n\n params = {\n \"key\": api_key,\n \"cx\": cse_id,\n \"q\": query,\n \"num\": num_results,\n \"hl\": language,\n \"safe\": \"active\" if safe_search else \"off\",\n }\n\n if country:\n params[\"gl\"] = country\n\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(\"https://www.googleapis.com/customsearch/v1\", params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n\n results = []\n if \"items\" in data:\n for item in data[\"items\"]:\n result = {\"title\": item.get(\"title\", \"\"), \"link\": item.get(\"link\", \"\")}\n if include_snippets:\n result[\"snippet\"] = item.get(\"snippet\", \"\")\n\n if include_content:\n result[\"content\"] = await fetch_page_content(result[\"link\"], max_length=content_max_length)\n\n results.append(result)\n\n return results\n\n except httpx.RequestError as e:\n raise ValueError(f\"Failed to perform search: {str(e)}\") from e\n except KeyError as e:\n raise ValueError(f\"Invalid API response format: {str(e)}\") from e\n except Exception as e:\n raise ValueError(f\"Error during search: {str(e)}\") from e\n", + "name": "google_search", + "description": "\n Perform Google searches using the Custom Search API with optional webpage content fetching.\n Requires GOOGLE_API_KEY and GOOGLE_CSE_ID environment variables to be set.\n ", + "global_imports": [ + { + "module": "typing", + "imports": ["List", "Dict", "Optional"] + }, + "os", + "httpx", + "html2text", + { + "module": "bs4", + "imports": ["BeautifulSoup"] + }, + { + "module": "urllib.parse", + "imports": ["urljoin"] + } + ], + "has_cancellation_support": false + } + }, + { + "provider": "autogen_core.tools.FunctionTool", + "component_type": "tool", + "version": 1, + "component_version": 1, + "description": "Create custom tools by wrapping standard Python functions.", + "label": "FunctionTool", + "config": { + "source_code": "async def fetch_webpage(\n url: str, include_images: bool = True, max_length: Optional[int] = None, headers: Optional[Dict[str, str]] = None\n) -> str:\n \"\"\"Fetch a webpage and convert it to markdown format.\n\n Args:\n url: The URL of the webpage to fetch\n include_images: Whether to include image references in the markdown\n max_length: Maximum length of the output markdown (if None, no limit)\n headers: Optional HTTP headers for the request\n\n Returns:\n str: Markdown version of the webpage content\n\n Raises:\n ValueError: If the URL is invalid or the page can't be fetched\n \"\"\"\n # Use default headers if none provided\n if headers is None:\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\"}\n\n try:\n # Fetch the webpage\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n\n # Parse HTML\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n\n # Convert relative URLs to absolute\n for tag in soup.find_all([\"a\", \"img\"]):\n if tag.get(\"href\"):\n tag[\"href\"] = urljoin(url, tag[\"href\"])\n if tag.get(\"src\"):\n tag[\"src\"] = urljoin(url, tag[\"src\"])\n\n # Configure HTML to Markdown converter\n h2t = html2text.HTML2Text()\n h2t.body_width = 0 # No line wrapping\n h2t.ignore_images = not include_images\n h2t.ignore_emphasis = False\n h2t.ignore_links = False\n h2t.ignore_tables = False\n\n # Convert to markdown\n markdown = h2t.handle(str(soup))\n\n # Trim if max_length is specified\n if max_length and len(markdown) > max_length:\n markdown = markdown[:max_length] + \"\\n...(truncated)\"\n\n return markdown.strip()\n\n except httpx.RequestError as e:\n raise ValueError(f\"Failed to fetch webpage: {str(e)}\") from e\n except Exception as e:\n raise ValueError(f\"Error processing webpage: {str(e)}\") from e\n", + "name": "fetch_webpage", + "description": "Fetch a webpage and convert it to markdown format, with options for including images and limiting length", + "global_imports": [ + "os", + "html2text", + { + "module": "typing", + "imports": ["Optional", "Dict"] + }, + "httpx", + { + "module": "bs4", + "imports": ["BeautifulSoup"] + }, + { + "module": "html2text", + "imports": ["HTML2Text"] + }, + { + "module": "urllib.parse", + "imports": ["urljoin"] + } + ], + "has_cancellation_support": false + } + } + ], + "handoffs": [], + "model_context": { + "provider": "autogen_core.model_context.UnboundedChatCompletionContext", + "component_type": "chat_completion_context", + "version": 1, + "component_version": 1, + "description": "An unbounded chat completion context that keeps a view of the all the messages.", + "label": "UnboundedChatCompletionContext", + "config": {} + }, + "description": "A research assistant that performs web searches and analyzes information", + "system_message": "You are a research assistant focused on finding accurate information.\n Use the google_search tool to find relevant information.\n Break down complex queries into specific search terms.\n Always verify information across multiple sources when possible.\n When you find relevant information, explain why it's relevant and how it connects to the query. When you get feedback from the a verifier agent, use your tools to act on the feedback and make progress.", + "model_client_stream": false, + "reflect_on_tool_use": false, + "tool_call_summary_format": "{result}" + } + }, + { + "provider": "autogen_agentchat.agents.AssistantAgent", + "component_type": "agent", + "version": 1, + "component_version": 1, + "description": "An agent that provides assistance with tool use.", + "label": "AssistantAgent", + "config": { + "name": "verifier", + "model_client": { + "provider": "autogen_ext.models.openai.OpenAIChatCompletionClient", + "component_type": "model", + "version": 1, + "component_version": 1, + "description": "Chat completion client for OpenAI hosted models.", + "label": "OpenAIChatCompletionClient", + "config": { + "temperature": 0.7, + "model": "gpt-4o" + } + }, + "tools": [], + "handoffs": [], + "model_context": { + "provider": "autogen_core.model_context.UnboundedChatCompletionContext", + "component_type": "chat_completion_context", + "version": 1, + "component_version": 1, + "description": "An unbounded chat completion context that keeps a view of the all the messages.", + "label": "UnboundedChatCompletionContext", + "config": {} + }, + "description": "A verification specialist who ensures research quality and completeness", + "system_message": "You are a research verification specialist.\n Your role is to:\n 1. Verify that search queries are effective and suggest improvements if needed\n 2. Explore drill downs where needed e.g, if the answer is likely in a link in the returned search results, suggest clicking on the link\n 3. Suggest additional angles or perspectives to explore. Be judicious in suggesting new paths to avoid scope creep or wasting resources, if the task appears to be addressed and we can provide a report, do this and respond with \"TERMINATE\".\n 4. Track progress toward answering the original question\n 5. When the research is complete, provide a detailed summary in markdown format\n \n For incomplete research, end your message with \"CONTINUE RESEARCH\". \n For complete research, end your message with APPROVED.\n \n Your responses should be structured as:\n - Progress Assessment\n - Gaps/Issues (if any)\n - Suggestions (if needed)\n - Next Steps or Final Summary", + "model_client_stream": false, + "reflect_on_tool_use": false, + "tool_call_summary_format": "{result}" + } + }, + { + "provider": "autogen_agentchat.agents.AssistantAgent", + "component_type": "agent", + "version": 1, + "component_version": 1, + "description": "An agent that provides assistance with tool use.", + "label": "AssistantAgent", + "config": { + "name": "summary_agent", + "model_client": { + "provider": "autogen_ext.models.openai.OpenAIChatCompletionClient", + "component_type": "model", + "version": 1, + "component_version": 1, + "description": "Chat completion client for OpenAI hosted models.", + "label": "OpenAIChatCompletionClient", + "config": { + "temperature": 0.7, + "model": "gpt-4o" + } + }, + "tools": [], + "handoffs": [], + "model_context": { + "provider": "autogen_core.model_context.UnboundedChatCompletionContext", + "component_type": "chat_completion_context", + "version": 1, + "component_version": 1, + "description": "An unbounded chat completion context that keeps a view of the all the messages.", + "label": "UnboundedChatCompletionContext", + "config": {} + }, + "description": "A summary agent that provides a detailed markdown summary of the research as a report to the user.", + "system_message": "You are a summary agent. Your role is to provide a detailed markdown summary of the research as a report to the user. Your report should have a reasonable title that matches the research question and should summarize the key details in the results found in natural an actionable manner. The main results/answer should be in the first paragraph.\n Your report should end with the word \"TERMINATE\" to signal the end of the conversation.", + "model_client_stream": false, + "reflect_on_tool_use": false, + "tool_call_summary_format": "{result}" + } + } + ], + "model_client": { + "provider": "autogen_ext.models.openai.OpenAIChatCompletionClient", + "component_type": "model", + "version": 1, + "component_version": 1, + "description": "Chat completion client for OpenAI hosted models.", + "label": "OpenAIChatCompletionClient", + "config": { + "temperature": 0.7, + "model": "gpt-4o" + } + }, + "termination_condition": { + "provider": "autogen_agentchat.base.OrTerminationCondition", + "component_type": "termination", + "version": 1, + "component_version": 1, + "label": "OrTerminationCondition", + "config": { + "conditions": [ + { + "provider": "autogen_agentchat.conditions.TextMentionTermination", + "component_type": "termination", + "version": 1, + "component_version": 1, + "description": "Terminate the conversation if a specific text is mentioned.", + "label": "TextMentionTermination", + "config": { + "text": "TERMINATE" + } + }, + { + "provider": "autogen_agentchat.conditions.MaxMessageTermination", + "component_type": "termination", + "version": 1, + "component_version": 1, + "description": "Terminate the conversation after a maximum number of messages have been exchanged.", + "label": "MaxMessageTermination", + "config": { + "max_messages": 30 + } + } + ] + } + }, + "selector_prompt": "You are coordinating a research team by selecting the team member to speak/act next. The following team member roles are available:\n {roles}.\n The research_assistant performs searches and analyzes information.\n The verifier evaluates progress and ensures completeness.\n The summary_agent provides a detailed markdown summary of the research as a report to the user.\n\n Given the current context, select the most appropriate next speaker.\n The research_assistant should search and analyze.\n The verifier should evaluate progress and guide the research (select this role is there is a need to verify/evaluate progress). You should ONLY select the summary_agent role if the research is complete and it is time to generate a report.\n\n Base your selection on:\n 1. Current stage of research\n 2. Last speaker's findings or suggestions\n 3. Need for verification vs need for new information\n \n Read the following conversation. Then select the next role from {participants} to play. Only return the role.\n\n {history}\n\n Read the above conversation. Then select the next role from {participants} to play. ONLY RETURN THE ROLE.", + "allow_repeated_speaker": true + } } ], "components": { @@ -283,7 +527,7 @@ "description": "Create custom tools by wrapping standard Python functions.", "label": "FunctionTool", "config": { - "source_code": "def calculator(a: float, b: float, operator: str) -> str:\n try:\n if operator == \"+\":\n return str(a + b)\n elif operator == \"-\":\n return str(a - b)\n elif operator == \"*\":\n return str(a * b)\n elif operator == \"/\":\n if b == 0:\n return \"Error: Division by zero\"\n return str(a / b)\n else:\n return \"Error: Invalid operator. Please use +, -, *, or /\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n", + "source_code": "def calculator(a: float, b: float, operator: str) -> str:\n try:\n if operator == \"+\":\n return str(a + b)\n elif operator == \"-\":\n return str(a - b)\n elif operator == \"*\":\n return str(a * b)\n elif operator == \"/\":\n if b == 0:\n return \"Error: Division by zero\"\n return str(a / b)\n else:\n return \"Error: Invalid operator. Please use +, -, *, or /\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n", "name": "calculator", "description": "A simple calculator that performs basic arithmetic operations", "global_imports": [], @@ -409,6 +653,12 @@ "label": "Mistral-7B vllm", "config": { "model": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF", + "model_info": { + "vision": false, + "function_calling": true, + "json_output": false, + "family": "unknown" + }, "base_url": "http://localhost:1234/v1" } } @@ -419,10 +669,10 @@ "component_type": "tool", "version": 1, "component_version": 1, - "description": "Create custom tools by wrapping standard Python functions.", - "label": "FunctionTool", + "description": "A tool that performs basic arithmetic operations (addition, subtraction, multiplication, division).", + "label": "Calculator Tool", "config": { - "source_code": "def calculator(a: float, b: float, operator: str) -> str:\n try:\n if operator == \"+\":\n return str(a + b)\n elif operator == \"-\":\n return str(a - b)\n elif operator == \"*\":\n return str(a * b)\n elif operator == \"/\":\n if b == 0:\n return \"Error: Division by zero\"\n return str(a / b)\n else:\n return \"Error: Invalid operator. Please use +, -, *, or /\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n", + "source_code": "def calculator(a: float, b: float, operator: str) -> str:\n try:\n if operator == \"+\":\n return str(a + b)\n elif operator == \"-\":\n return str(a - b)\n elif operator == \"*\":\n return str(a * b)\n elif operator == \"/\":\n if b == 0:\n return \"Error: Division by zero\"\n return str(a / b)\n else:\n return \"Error: Invalid operator. Please use +, -, *, or /\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n", "name": "calculator", "description": "A simple calculator that performs basic arithmetic operations", "global_imports": [], @@ -437,10 +687,13 @@ "description": "A tool that generates images based on a text description using OpenAI's DALL-E model. Note: Requires OpenAI API key to function.", "label": "Image Generation Tool", "config": { - "source_code": "async def generate_image(\n query: str,\n output_dir: Optional[Path] = None,\n image_size: Literal[\"1024x1024\", \"512x512\", \"256x256\"] = \"1024x1024\"\n) -> List[str]:\n \"\"\"\n Generate images using OpenAI's DALL-E model based on a text description.\n\n Args:\n query: Natural language description of the desired image\n output_dir: Directory to save generated images (default: current directory)\n image_size: Size of generated image (1024x1024, 512x512, or 256x256)\n\n Returns:\n List[str]: Paths to the generated image files\n \"\"\"\n # Initialize the OpenAI client\n client = OpenAI()\n\n # Generate images using DALL-E 3\n response = client.images.generate(\n model=\"dall-e-3\",\n prompt=query,\n n=1,\n response_format=\"b64_json\",\n size=image_size\n )\n\n saved_files = []\n\n # Process the response\n if response.data:\n for image_data in response.data:\n # Generate a unique filename\n file_name = f\"{uuid.uuid4()}.png\"\n\n # Use output_dir if provided, otherwise use current directory\n file_path = Path(output_dir) / file_name if output_dir else Path(file_name)\n\n base64_str = image_data.b64_json \n img = Image.open(io.BytesIO(base64.decodebytes(bytes(base64_str, \"utf-8\")))) \n\n # Save the image to a file \n img.save(file_path) \n\n saved_files.append(str(file_path))\n\n return saved_files\n", + "source_code": "async def generate_image(\n query: str, output_dir: Optional[Path] = None, image_size: Literal[\"1024x1024\", \"512x512\", \"256x256\"] = \"1024x1024\"\n) -> List[str]:\n \"\"\"\n Generate images using OpenAI's DALL-E model based on a text description.\n\n Args:\n query: Natural language description of the desired image\n output_dir: Directory to save generated images (default: current directory)\n image_size: Size of generated image (1024x1024, 512x512, or 256x256)\n\n Returns:\n List[str]: Paths to the generated image files\n \"\"\"\n # Initialize the OpenAI client\n client = OpenAI()\n\n # Generate images using DALL-E 3\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, response_format=\"b64_json\", size=image_size)\n\n saved_files = []\n\n # Process the response\n if response.data:\n for image_data in response.data:\n # Generate a unique filename\n file_name = f\"{uuid.uuid4()}.png\"\n\n # Use output_dir if provided, otherwise use current directory\n file_path = Path(output_dir) / file_name if output_dir else Path(file_name)\n\n base64_str = image_data.b64_json\n img = Image.open(io.BytesIO(base64.decodebytes(bytes(base64_str, \"utf-8\"))))\n\n # Save the image to a file\n img.save(file_path)\n\n saved_files.append(str(file_path))\n\n return saved_files\n", "name": "generate_image", "description": "Generate images using DALL-E based on text descriptions.", "global_imports": [ + "io", + "uuid", + "base64", { "module": "typing", "imports": ["List", "Optional", "Literal"] @@ -449,10 +702,6 @@ "module": "pathlib", "imports": ["Path"] }, - { - "module": "uuid", - "imports": ["uuid4"] - }, { "module": "openai", "imports": ["OpenAI"] @@ -460,14 +709,6 @@ { "module": "PIL", "imports": ["Image"] - }, - { - "module": "io", - "imports": ["BytesIO"] - }, - { - "module": "base64", - "imports": ["decodebytes"] } ], "has_cancellation_support": false @@ -481,10 +722,13 @@ "description": "A tool that generates a PDF file from a list of images.Requires the PyFPDF and pillow library to function.", "label": "PDF Generation Tool", "config": { - "source_code": "async def generate_pdf(\n sections: List[Dict[str, Optional[str]]], \n output_file: str = \"report.pdf\",\n report_title: str = \"PDF Report\"\n) -> str:\n \"\"\"\n Generate a PDF report with formatted sections including text and images.\n\n Args:\n sections: List of dictionaries containing section details with keys:\n - title: Section title\n - level: Heading level (title, h1, h2)\n - content: Section text content\n - image: Optional image URL or file path\n output_file: Name of output PDF file\n report_title: Title shown at top of report\n\n Returns:\n str: Path to the generated PDF file\n \"\"\"\n def normalize_text(text: str) -> str:\n \"\"\"Normalize Unicode text to ASCII.\"\"\"\n return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('ascii')\n\n def get_image(image_url_or_path):\n \"\"\"Fetch image from URL or local path.\"\"\"\n if image_url_or_path.startswith((\"http://\", \"https://\")):\n response = requests.get(image_url_or_path)\n if response.status_code == 200:\n return BytesIO(response.content)\n elif Path(image_url_or_path).is_file():\n return open(image_url_or_path, 'rb')\n return None\n\n def add_rounded_corners(img, radius=6):\n \"\"\"Add rounded corners to an image.\"\"\"\n mask = Image.new('L', img.size, 0)\n draw = ImageDraw.Draw(mask)\n draw.rounded_rectangle([(0, 0), img.size], radius, fill=255)\n img = ImageOps.fit(img, mask.size, centering=(0.5, 0.5))\n img.putalpha(mask)\n return img\n\n class PDF(FPDF):\n \"\"\"Custom PDF class with header and content formatting.\"\"\"\n def header(self):\n self.set_font(\"Arial\", \"B\", 12)\n normalized_title = normalize_text(report_title)\n self.cell(0, 10, normalized_title, 0, 1, \"C\")\n\n def chapter_title(self, txt): \n self.set_font(\"Arial\", \"B\", 12)\n normalized_txt = normalize_text(txt)\n self.cell(0, 10, normalized_txt, 0, 1, \"L\")\n self.ln(2)\n\n def chapter_body(self, body):\n self.set_font(\"Arial\", \"\", 12)\n normalized_body = normalize_text(body)\n self.multi_cell(0, 10, normalized_body)\n self.ln()\n\n def add_image(self, img_data):\n img = Image.open(img_data)\n img = add_rounded_corners(img)\n img_path = Path(f\"temp_{uuid.uuid4().hex}.png\")\n img.save(img_path, format=\"PNG\")\n self.image(str(img_path), x=None, y=None, w=190 if img.width > 190 else img.width)\n self.ln(10)\n img_path.unlink()\n\n # Initialize PDF\n pdf = PDF()\n pdf.add_page()\n font_size = {\"title\": 16, \"h1\": 14, \"h2\": 12, \"body\": 12}\n\n # Add sections\n for section in sections:\n title = section.get(\"title\", \"\")\n level = section.get(\"level\", \"h1\")\n content = section.get(\"content\", \"\")\n image = section.get(\"image\")\n\n pdf.set_font(\"Arial\", \"B\" if level in font_size else \"\", font_size.get(level, font_size[\"body\"]))\n pdf.chapter_title(title)\n\n if content:\n pdf.chapter_body(content)\n\n if image:\n img_data = get_image(image)\n if img_data:\n pdf.add_image(img_data)\n if isinstance(img_data, BytesIO):\n img_data.close()\n\n pdf.output(output_file)\n return output_file\n", + "source_code": "async def generate_pdf(\n sections: List[Dict[str, Optional[str]]], output_file: str = \"report.pdf\", report_title: str = \"PDF Report\"\n) -> str:\n \"\"\"\n Generate a PDF report with formatted sections including text and images.\n\n Args:\n sections: List of dictionaries containing section details with keys:\n - title: Section title\n - level: Heading level (title, h1, h2)\n - content: Section text content\n - image: Optional image URL or file path\n output_file: Name of output PDF file\n report_title: Title shown at top of report\n\n Returns:\n str: Path to the generated PDF file\n \"\"\"\n\n def normalize_text(text: str) -> str:\n \"\"\"Normalize Unicode text to ASCII.\"\"\"\n return unicodedata.normalize(\"NFKD\", text).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n\n def get_image(image_url_or_path):\n \"\"\"Fetch image from URL or local path.\"\"\"\n if image_url_or_path.startswith((\"http://\", \"https://\")):\n response = requests.get(image_url_or_path)\n if response.status_code == 200:\n return BytesIO(response.content)\n elif Path(image_url_or_path).is_file():\n return open(image_url_or_path, \"rb\")\n return None\n\n def add_rounded_corners(img, radius=6):\n \"\"\"Add rounded corners to an image.\"\"\"\n mask = Image.new(\"L\", img.size, 0)\n draw = ImageDraw.Draw(mask)\n draw.rounded_rectangle([(0, 0), img.size], radius, fill=255)\n img = ImageOps.fit(img, mask.size, centering=(0.5, 0.5))\n img.putalpha(mask)\n return img\n\n class PDF(FPDF):\n \"\"\"Custom PDF class with header and content formatting.\"\"\"\n\n def header(self):\n self.set_font(\"Arial\", \"B\", 12)\n normalized_title = normalize_text(report_title)\n self.cell(0, 10, normalized_title, 0, 1, \"C\")\n\n def chapter_title(self, txt):\n self.set_font(\"Arial\", \"B\", 12)\n normalized_txt = normalize_text(txt)\n self.cell(0, 10, normalized_txt, 0, 1, \"L\")\n self.ln(2)\n\n def chapter_body(self, body):\n self.set_font(\"Arial\", \"\", 12)\n normalized_body = normalize_text(body)\n self.multi_cell(0, 10, normalized_body)\n self.ln()\n\n def add_image(self, img_data):\n img = Image.open(img_data)\n img = add_rounded_corners(img)\n img_path = Path(f\"temp_{uuid.uuid4().hex}.png\")\n img.save(img_path, format=\"PNG\")\n self.image(str(img_path), x=None, y=None, w=190 if img.width > 190 else img.width)\n self.ln(10)\n img_path.unlink()\n\n # Initialize PDF\n pdf = PDF()\n pdf.add_page()\n font_size = {\"title\": 16, \"h1\": 14, \"h2\": 12, \"body\": 12}\n\n # Add sections\n for section in sections:\n title = section.get(\"title\", \"\")\n level = section.get(\"level\", \"h1\")\n content = section.get(\"content\", \"\")\n image = section.get(\"image\")\n\n pdf.set_font(\"Arial\", \"B\" if level in font_size else \"\", font_size.get(level, font_size[\"body\"]))\n pdf.chapter_title(title)\n\n if content:\n pdf.chapter_body(content)\n\n if image:\n img_data = get_image(image)\n if img_data:\n pdf.add_image(img_data)\n if isinstance(img_data, BytesIO):\n img_data.close()\n\n pdf.output(output_file)\n return output_file\n", "name": "generate_pdf", "description": "Generate PDF reports with formatted sections containing text and images", "global_imports": [ + "uuid", + "requests", + "unicodedata", { "module": "typing", "imports": ["List", "Dict", "Optional"] @@ -493,14 +737,6 @@ "module": "pathlib", "imports": ["Path"] }, - { - "module": "uuid", - "imports": ["uuid4"] - }, - { - "module": "requests", - "imports": ["get"] - }, { "module": "fpdf", "imports": ["FPDF"] @@ -512,10 +748,6 @@ { "module": "io", "imports": ["BytesIO"] - }, - { - "module": "unicodedata", - "imports": ["normalize"] } ], "has_cancellation_support": false @@ -526,21 +758,20 @@ "component_type": "tool", "version": 1, "component_version": 1, - "description": "A tool that generates a webpage from a list of images. Requires beautifulsoup4 html2text library to function.", - "label": "Webpage Generation Tool", + "description": "A tool that fetches the content of a webpage and converts it to markdown. Requires the requests and beautifulsoup4 library to function.", + "label": "Fetch Webpage Tool", "config": { - "source_code": "async def fetch_webpage(\n url: str,\n include_images: bool = True,\n max_length: Optional[int] = None,\n headers: Optional[Dict[str, str]] = None\n) -> str:\n \"\"\"\n Fetch a webpage and convert it to markdown format.\n\n Args:\n url: The URL of the webpage to fetch\n include_images: Whether to include image references in the markdown\n max_length: Maximum length of the output markdown (if None, no limit)\n headers: Optional HTTP headers for the request\n\n Returns:\n str: Markdown version of the webpage content\n\n Raises:\n ValueError: If the URL is invalid or the page can't be fetched\n \"\"\"\n # Use default headers if none provided\n if headers is None:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'\n }\n\n try:\n # Fetch the webpage\n response = requests.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n\n # Parse HTML\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n\n # Convert relative URLs to absolute\n for tag in soup.find_all(['a', 'img']):\n if tag.get('href'):\n tag['href'] = urljoin(url, tag['href'])\n if tag.get('src'):\n tag['src'] = urljoin(url, tag['src'])\n\n # Configure HTML to Markdown converter\n h2t = html2text.HTML2Text()\n h2t.body_width = 0 # No line wrapping\n h2t.ignore_images = not include_images\n h2t.ignore_emphasis = False\n h2t.ignore_links = False\n h2t.ignore_tables = False\n\n # Convert to markdown\n markdown = h2t.handle(str(soup))\n\n # Trim if max_length is specified\n if max_length and len(markdown) > max_length:\n markdown = markdown[:max_length] + \"\\n...(truncated)\"\n\n return markdown.strip()\n\n except requests.RequestException as e:\n raise ValueError(f\"Failed to fetch webpage: {str(e)}\")\n except Exception as e:\n raise ValueError(f\"Error processing webpage: {str(e)}\")\n", + "source_code": "async def fetch_webpage(\n url: str, include_images: bool = True, max_length: Optional[int] = None, headers: Optional[Dict[str, str]] = None\n) -> str:\n \"\"\"Fetch a webpage and convert it to markdown format.\n\n Args:\n url: The URL of the webpage to fetch\n include_images: Whether to include image references in the markdown\n max_length: Maximum length of the output markdown (if None, no limit)\n headers: Optional HTTP headers for the request\n\n Returns:\n str: Markdown version of the webpage content\n\n Raises:\n ValueError: If the URL is invalid or the page can't be fetched\n \"\"\"\n # Use default headers if none provided\n if headers is None:\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\"}\n\n try:\n # Fetch the webpage\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n\n # Parse HTML\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n\n # Convert relative URLs to absolute\n for tag in soup.find_all([\"a\", \"img\"]):\n if tag.get(\"href\"):\n tag[\"href\"] = urljoin(url, tag[\"href\"])\n if tag.get(\"src\"):\n tag[\"src\"] = urljoin(url, tag[\"src\"])\n\n # Configure HTML to Markdown converter\n h2t = html2text.HTML2Text()\n h2t.body_width = 0 # No line wrapping\n h2t.ignore_images = not include_images\n h2t.ignore_emphasis = False\n h2t.ignore_links = False\n h2t.ignore_tables = False\n\n # Convert to markdown\n markdown = h2t.handle(str(soup))\n\n # Trim if max_length is specified\n if max_length and len(markdown) > max_length:\n markdown = markdown[:max_length] + \"\\n...(truncated)\"\n\n return markdown.strip()\n\n except httpx.RequestError as e:\n raise ValueError(f\"Failed to fetch webpage: {str(e)}\") from e\n except Exception as e:\n raise ValueError(f\"Error processing webpage: {str(e)}\") from e\n", "name": "fetch_webpage", "description": "Fetch a webpage and convert it to markdown format, with options for including images and limiting length", "global_imports": [ + "os", + "html2text", { "module": "typing", "imports": ["Optional", "Dict"] }, - { - "module": "requests", - "imports": ["get", "RequestException"] - }, + "httpx", { "module": "bs4", "imports": ["BeautifulSoup"] @@ -565,25 +796,25 @@ "description": "A tool that performs Bing searches using the Bing Web Search API. Requires the requests library, BING_SEARCH_KEY env variable to function.", "label": "Bing Search Tool", "config": { - "source_code": "async def bing_search(\n query: str,\n num_results: int = 5,\n include_snippets: bool = True,\n language: str = \"en\",\n country: Optional[str] = None,\n safe_search: str = \"moderate\",\n response_filter: str = \"webpages\"\n) -> List[Dict[str, str]]:\n \"\"\"\n Perform a Bing search using the Bing Web Search API. \n\n Args:\n query: Search query string\n num_results: Number of results to return (max 50)\n include_snippets: Include result snippets in output\n language: Language code for search results (e.g., 'en', 'es', 'fr')\n country: Optional market code for search results (e.g., 'us', 'uk')\n safe_search: SafeSearch setting ('off', 'moderate', or 'strict')\n response_filter: Type of results ('webpages', 'news', 'images', or 'videos')\n\n Returns:\n List[Dict[str, str]]: List of search results\n\n Raises:\n ValueError: If API credentials are invalid or request fails\n \"\"\"\n # Get and validate API key\n api_key = os.getenv('BING_SEARCH_KEY', '').strip()\n\n if not api_key:\n raise ValueError(\n \"BING_SEARCH_KEY environment variable is not set. \"\n \"Please obtain an API key from Azure Portal.\"\n )\n\n # Validate safe_search parameter\n valid_safe_search = ['off', 'moderate', 'strict']\n if safe_search.lower() not in valid_safe_search:\n raise ValueError(\n f\"Invalid safe_search value. Must be one of: {', '.join(valid_safe_search)}\"\n )\n\n # Validate response_filter parameter\n valid_filters = ['webpages', 'news', 'images', 'videos']\n if response_filter.lower() not in valid_filters:\n raise ValueError(\n f\"Invalid response_filter value. Must be one of: {', '.join(valid_filters)}\"\n )\n\n # Build request headers and parameters\n headers = {\n 'Ocp-Apim-Subscription-Key': api_key,\n 'Accept': 'application/json'\n }\n\n params = {\n 'q': query,\n 'count': min(max(1, num_results), 50),\n 'mkt': f'{language}-{country.upper()}' if country else language,\n 'safeSearch': safe_search.capitalize(),\n 'responseFilter': response_filter,\n 'textFormat': 'raw'\n }\n\n # Make the request\n try:\n response = requests.get(\n 'https://api.bing.microsoft.com/v7.0/search',\n headers=headers,\n params=params,\n timeout=10\n )\n\n # Handle common error cases\n if response.status_code == 401:\n raise ValueError(\n \"Authentication failed. Please verify your Bing Search API key.\"\n )\n elif response.status_code == 403:\n raise ValueError(\n \"Access forbidden. This could mean:\\n\"\n \"1. The API key is invalid\\n\"\n \"2. The API key has expired\\n\"\n \"3. You've exceeded your API quota\"\n )\n elif response.status_code == 429:\n raise ValueError(\"API quota exceeded. Please try again later.\")\n\n response.raise_for_status()\n data = response.json()\n\n # Process results based on response_filter\n results = []\n if response_filter == 'webpages' and 'webPages' in data:\n items = data['webPages']['value']\n elif response_filter == 'news' and 'news' in data:\n items = data['news']['value']\n elif response_filter == 'images' and 'images' in data:\n items = data['images']['value']\n elif response_filter == 'videos' and 'videos' in data:\n items = data['videos']['value']\n else:\n if not any(key in data for key in ['webPages', 'news', 'images', 'videos']):\n return [] # No results found\n raise ValueError(f\"No {response_filter} results found in API response\")\n\n # Extract relevant information based on result type\n for item in items:\n result = {'title': item.get('name', '')}\n\n if response_filter == 'webpages':\n result['link'] = item.get('url', '')\n if include_snippets:\n result['snippet'] = item.get('snippet', '')\n\n elif response_filter == 'news':\n result['link'] = item.get('url', '')\n if include_snippets:\n result['snippet'] = item.get('description', '')\n result['date'] = item.get('datePublished', '')\n\n elif response_filter == 'images':\n result['link'] = item.get('contentUrl', '')\n result['thumbnail'] = item.get('thumbnailUrl', '')\n if include_snippets:\n result['snippet'] = item.get('description', '')\n\n elif response_filter == 'videos':\n result['link'] = item.get('contentUrl', '')\n result['thumbnail'] = item.get('thumbnailUrl', '')\n if include_snippets:\n result['snippet'] = item.get('description', '')\n result['duration'] = item.get('duration', '')\n\n results.append(result)\n\n return results[:num_results]\n\n except requests.RequestException as e:\n error_msg = str(e)\n if \"InvalidApiKey\" in error_msg:\n raise ValueError(\n \"Invalid API key. Please check your BING_SEARCH_KEY environment variable.\"\n )\n elif \"KeyExpired\" in error_msg:\n raise ValueError(\"API key has expired. Please generate a new key.\")\n else:\n raise ValueError(f\"Search request failed: {error_msg}\")\n except json.JSONDecodeError:\n raise ValueError(\n \"Failed to parse API response. \"\n \"Please verify your API credentials and try again.\"\n )\n except Exception as e:\n raise ValueError(f\"Unexpected error during search: {str(e)}\")\n", + "source_code": "async def bing_search(\n query: str,\n num_results: int = 5,\n include_snippets: bool = True,\n include_content: bool = True,\n content_max_length: Optional[int] = 15000,\n language: str = \"en\",\n country: Optional[str] = None,\n safe_search: str = \"moderate\",\n response_filter: str = \"webpages\",\n) -> List[Dict[str, str]]:\n \"\"\"\n Perform a Bing search using the Bing Web Search API.\n\n Args:\n query: Search query string\n num_results: Number of results to return (max 50)\n include_snippets: Include result snippets in output\n include_content: Include full webpage content in markdown format\n content_max_length: Maximum length of webpage content (if included)\n language: Language code for search results (e.g., 'en', 'es', 'fr')\n country: Optional market code for search results (e.g., 'us', 'uk')\n safe_search: SafeSearch setting ('off', 'moderate', or 'strict')\n response_filter: Type of results ('webpages', 'news', 'images', or 'videos')\n\n Returns:\n List[Dict[str, str]]: List of search results\n\n Raises:\n ValueError: If API credentials are invalid or request fails\n \"\"\"\n # Get and validate API key\n api_key = os.getenv(\"BING_SEARCH_KEY\", \"\").strip()\n\n if not api_key:\n raise ValueError(\n \"BING_SEARCH_KEY environment variable is not set. \" \"Please obtain an API key from Azure Portal.\"\n )\n\n # Validate safe_search parameter\n valid_safe_search = [\"off\", \"moderate\", \"strict\"]\n if safe_search.lower() not in valid_safe_search:\n raise ValueError(f\"Invalid safe_search value. Must be one of: {', '.join(valid_safe_search)}\")\n\n # Validate response_filter parameter\n valid_filters = [\"webpages\", \"news\", \"images\", \"videos\"]\n if response_filter.lower() not in valid_filters:\n raise ValueError(f\"Invalid response_filter value. Must be one of: {', '.join(valid_filters)}\")\n\n async def fetch_page_content(url: str, max_length: Optional[int] = 50000) -> str:\n \"\"\"Helper function to fetch and convert webpage content to markdown\"\"\"\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\"}\n\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n\n # Convert relative URLs to absolute\n for tag in soup.find_all([\"a\", \"img\"]):\n if tag.get(\"href\"):\n tag[\"href\"] = urljoin(url, tag[\"href\"])\n if tag.get(\"src\"):\n tag[\"src\"] = urljoin(url, tag[\"src\"])\n\n h2t = html2text.HTML2Text()\n h2t.body_width = 0\n h2t.ignore_images = False\n h2t.ignore_emphasis = False\n h2t.ignore_links = False\n h2t.ignore_tables = False\n\n markdown = h2t.handle(str(soup))\n\n if max_length and len(markdown) > max_length:\n markdown = markdown[:max_length] + \"\\n...(truncated)\"\n\n return markdown.strip()\n\n except Exception as e:\n return f\"Error fetching content: {str(e)}\"\n\n # Build request headers and parameters\n headers = {\"Ocp-Apim-Subscription-Key\": api_key, \"Accept\": \"application/json\"}\n\n params = {\n \"q\": query,\n \"count\": min(max(1, num_results), 50),\n \"mkt\": f\"{language}-{country.upper()}\" if country else language,\n \"safeSearch\": safe_search.capitalize(),\n \"responseFilter\": response_filter,\n \"textFormat\": \"raw\",\n }\n\n # Make the request\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(\n \"https://api.bing.microsoft.com/v7.0/search\", headers=headers, params=params, timeout=10\n )\n\n # Handle common error cases\n if response.status_code == 401:\n raise ValueError(\"Authentication failed. Please verify your Bing Search API key.\")\n elif response.status_code == 403:\n raise ValueError(\n \"Access forbidden. This could mean:\\n\"\n \"1. The API key is invalid\\n\"\n \"2. The API key has expired\\n\"\n \"3. You've exceeded your API quota\"\n )\n elif response.status_code == 429:\n raise ValueError(\"API quota exceeded. Please try again later.\")\n\n response.raise_for_status()\n data = response.json()\n\n # Process results based on response_filter\n results = []\n if response_filter == \"webpages\" and \"webPages\" in data:\n items = data[\"webPages\"][\"value\"]\n elif response_filter == \"news\" and \"news\" in data:\n items = data[\"news\"][\"value\"]\n elif response_filter == \"images\" and \"images\" in data:\n items = data[\"images\"][\"value\"]\n elif response_filter == \"videos\" and \"videos\" in data:\n items = data[\"videos\"][\"value\"]\n else:\n if not any(key in data for key in [\"webPages\", \"news\", \"images\", \"videos\"]):\n return [] # No results found\n raise ValueError(f\"No {response_filter} results found in API response\")\n\n # Extract relevant information based on result type\n for item in items:\n result = {\"title\": item.get(\"name\", \"\")}\n\n if response_filter == \"webpages\":\n result[\"link\"] = item.get(\"url\", \"\")\n if include_snippets:\n result[\"snippet\"] = item.get(\"snippet\", \"\")\n if include_content:\n result[\"content\"] = await fetch_page_content(result[\"link\"], max_length=content_max_length)\n\n elif response_filter == \"news\":\n result[\"link\"] = item.get(\"url\", \"\")\n if include_snippets:\n result[\"snippet\"] = item.get(\"description\", \"\")\n result[\"date\"] = item.get(\"datePublished\", \"\")\n if include_content:\n result[\"content\"] = await fetch_page_content(result[\"link\"], max_length=content_max_length)\n\n elif response_filter == \"images\":\n result[\"link\"] = item.get(\"contentUrl\", \"\")\n result[\"thumbnail\"] = item.get(\"thumbnailUrl\", \"\")\n if include_snippets:\n result[\"snippet\"] = item.get(\"description\", \"\")\n\n elif response_filter == \"videos\":\n result[\"link\"] = item.get(\"contentUrl\", \"\")\n result[\"thumbnail\"] = item.get(\"thumbnailUrl\", \"\")\n if include_snippets:\n result[\"snippet\"] = item.get(\"description\", \"\")\n result[\"duration\"] = item.get(\"duration\", \"\")\n\n results.append(result)\n\n return results[:num_results]\n\n except httpx.RequestException as e:\n error_msg = str(e)\n if \"InvalidApiKey\" in error_msg:\n raise ValueError(\"Invalid API key. Please check your BING_SEARCH_KEY environment variable.\") from e\n elif \"KeyExpired\" in error_msg:\n raise ValueError(\"API key has expired. Please generate a new key.\") from e\n else:\n raise ValueError(f\"Search request failed: {error_msg}\") from e\n except json.JSONDecodeError:\n raise ValueError(\"Failed to parse API response. \" \"Please verify your API credentials and try again.\") from None\n except Exception as e:\n raise ValueError(f\"Unexpected error during search: {str(e)}\") from e\n", "name": "bing_search", - "description": "\n Perform Bing searches using the Bing Web Search API. \n Requires BING_SEARCH_KEY environment variable.\n Supports web, news, image, and video searches.\n See function documentation for detailed setup instructions.\n ", + "description": "\n Perform Bing searches using the Bing Web Search API. Requires BING_SEARCH_KEY environment variable.\n Supports web, news, image, and video searches.\n See function documentation for detailed setup instructions.\n ", "global_imports": [ { "module": "typing", "imports": ["List", "Dict", "Optional"] }, + "os", + "httpx", + "json", + "html2text", { - "module": "os", - "imports": ["getenv"] - }, - { - "module": "requests", - "imports": ["get", "RequestException"] + "module": "bs4", + "imports": ["BeautifulSoup"] }, { - "module": "json", - "imports": ["JSONDecodeError"] + "module": "urllib.parse", + "imports": ["urljoin"] } ], "has_cancellation_support": false @@ -597,21 +828,24 @@ "description": "A tool that performs Google searches using the Google Custom Search API. Requires the requests library, [GOOGLE_API_KEY, GOOGLE_CSE_ID] to be set, env variable to function.", "label": "Google Search Tool", "config": { - "source_code": "async def google_search(\n query: str,\n num_results: int = 5,\n include_snippets: bool = True,\n language: str = \"en\",\n country: Optional[str] = None,\n safe_search: bool = True\n) -> List[Dict[str, str]]:\n \"\"\"\n Perform a Google search using the Custom Search API. \n Args:\n query: Search query string\n num_results: Number of results to return (max 10)\n include_snippets: Include result snippets in output\n language: Language code for search results (e.g., 'en', 'es', 'fr')\n country: Optional country code for search results (e.g., 'us', 'uk')\n safe_search: Enable safe search filtering\n\n Returns:\n List[Dict[str, str]]: List of search results, each containing:\n - title: Result title\n - link: Result URL\n - snippet: Result description (if include_snippets=True)\n\n Raises:\n ValueError: If API keys are missing or invalid, or if the request fails\n \"\"\"\n # Get API credentials\n api_key = os.getenv('GOOGLE_API_KEY')\n cse_id = os.getenv('GOOGLE_CSE_ID')\n\n if not api_key or not cse_id:\n raise ValueError(\n \"Missing required environment variables. Please set GOOGLE_API_KEY and GOOGLE_CSE_ID. \"\n \"See function documentation for setup instructions.\"\n )\n\n # Ensure num_results is within allowed range\n num_results = min(max(1, num_results), 10)\n\n # Build request parameters\n params = {\n 'key': api_key,\n 'cx': cse_id,\n 'q': query,\n 'num': num_results,\n 'hl': language,\n 'safe': 'active' if safe_search else 'off',\n }\n\n if country:\n params['gl'] = country\n\n # Make the request\n try:\n response = requests.get(\n 'https://www.googleapis.com/customsearch/v1',\n params=params,\n timeout=10\n )\n response.raise_for_status()\n data = response.json()\n\n # Process results\n results = []\n if 'items' in data:\n for item in data['items']:\n result = {\n 'title': item.get('title', ''),\n 'link': item.get('link', '')\n }\n if include_snippets:\n result['snippet'] = item.get('snippet', '')\n results.append(result)\n\n return results\n\n except requests.RequestException as e:\n raise ValueError(f\"Failed to perform search: {str(e)}\")\n except KeyError as e:\n raise ValueError(f\"Invalid API response format: {str(e)}\")\n except Exception as e:\n raise ValueError(f\"Error during search: {str(e)}\")\n", + "source_code": "async def google_search(\n query: str,\n num_results: int = 5,\n include_snippets: bool = True,\n include_content: bool = True,\n content_max_length: Optional[int] = 15000,\n language: str = \"en\",\n country: Optional[str] = None,\n safe_search: bool = True,\n) -> List[Dict[str, str]]:\n \"\"\"\n Perform a Google search using the Custom Search API and optionally fetch webpage content.\n\n Args:\n query: Search query string\n num_results: Number of results to return (max 10)\n include_snippets: Include result snippets in output\n include_content: Include full webpage content in markdown format\n content_max_length: Maximum length of webpage content (if included)\n language: Language code for search results (e.g., en, es, fr)\n country: Optional country code for search results (e.g., us, uk)\n safe_search: Enable safe search filtering\n\n Returns:\n List[Dict[str, str]]: List of search results, each containing:\n - title: Result title\n - link: Result URL\n - snippet: Result description (if include_snippets=True)\n - content: Webpage content in markdown (if include_content=True)\n \"\"\"\n api_key = os.getenv(\"GOOGLE_API_KEY\")\n cse_id = os.getenv(\"GOOGLE_CSE_ID\")\n\n if not api_key or not cse_id:\n raise ValueError(\"Missing required environment variables. Please set GOOGLE_API_KEY and GOOGLE_CSE_ID.\")\n\n num_results = min(max(1, num_results), 10)\n\n async def fetch_page_content(url: str, max_length: Optional[int] = 50000) -> str:\n \"\"\"Helper function to fetch and convert webpage content to markdown\"\"\"\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\"}\n\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n\n # Convert relative URLs to absolute\n for tag in soup.find_all([\"a\", \"img\"]):\n if tag.get(\"href\"):\n tag[\"href\"] = urljoin(url, tag[\"href\"])\n if tag.get(\"src\"):\n tag[\"src\"] = urljoin(url, tag[\"src\"])\n\n h2t = html2text.HTML2Text()\n h2t.body_width = 0\n h2t.ignore_images = False\n h2t.ignore_emphasis = False\n h2t.ignore_links = False\n h2t.ignore_tables = False\n\n markdown = h2t.handle(str(soup))\n\n if max_length and len(markdown) > max_length:\n markdown = markdown[:max_length] + \"\\n...(truncated)\"\n\n return markdown.strip()\n\n except Exception as e:\n return f\"Error fetching content: {str(e)}\"\n\n params = {\n \"key\": api_key,\n \"cx\": cse_id,\n \"q\": query,\n \"num\": num_results,\n \"hl\": language,\n \"safe\": \"active\" if safe_search else \"off\",\n }\n\n if country:\n params[\"gl\"] = country\n\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(\"https://www.googleapis.com/customsearch/v1\", params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n\n results = []\n if \"items\" in data:\n for item in data[\"items\"]:\n result = {\"title\": item.get(\"title\", \"\"), \"link\": item.get(\"link\", \"\")}\n if include_snippets:\n result[\"snippet\"] = item.get(\"snippet\", \"\")\n\n if include_content:\n result[\"content\"] = await fetch_page_content(result[\"link\"], max_length=content_max_length)\n\n results.append(result)\n\n return results\n\n except httpx.RequestError as e:\n raise ValueError(f\"Failed to perform search: {str(e)}\") from e\n except KeyError as e:\n raise ValueError(f\"Invalid API response format: {str(e)}\") from e\n except Exception as e:\n raise ValueError(f\"Error during search: {str(e)}\") from e\n", "name": "google_search", - "description": "\n Perform Google searches using the Custom Search API. \n Requires GOOGLE_API_KEY and GOOGLE_CSE_ID environment variables to be set.\n See function documentation for setup instructions.\n ", + "description": "\n Perform Google searches using the Custom Search API with optional webpage content fetching.\n Requires GOOGLE_API_KEY and GOOGLE_CSE_ID environment variables to be set.\n ", "global_imports": [ { "module": "typing", "imports": ["List", "Dict", "Optional"] }, + "os", + "httpx", + "html2text", { - "module": "os", - "imports": ["getenv"] + "module": "bs4", + "imports": ["BeautifulSoup"] }, { - "module": "requests", - "imports": ["get", "RequestException"] + "module": "urllib.parse", + "imports": ["urljoin"] } ], "has_cancellation_support": false diff --git a/python/packages/autogen-studio/frontend/src/components/views/gallery/store.tsx b/python/packages/autogen-studio/frontend/src/components/views/gallery/store.tsx index 7c8edc0e2947..465554b58a74 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/gallery/store.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/gallery/store.tsx @@ -151,7 +151,7 @@ export const useGalleryStore = create()( }, }), { - name: "gallery-storage-v2", + name: "gallery-storage-v3", } ) ); diff --git a/python/packages/autogen-studio/frontend/src/components/views/session/chat/chat.tsx b/python/packages/autogen-studio/frontend/src/components/views/session/chat/chat.tsx index 8f5ea8bcd39a..e5b3bd11d070 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/session/chat/chat.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/session/chat/chat.tsx @@ -148,8 +148,6 @@ export default function ChatView({ session }: ChatViewProps) { setCurrentRun((current) => { if (!current || !session?.id) return null; - console.log("WebSocket message:", message); - switch (message.type) { case "error": if (inputTimeoutRef.current) { @@ -305,7 +303,6 @@ export default function ChatView({ session }: ChatViewProps) { } try { - console.log("Sending input response:", response); activeSocketRef.current.send( JSON.stringify({ type: "input_response", diff --git a/python/packages/autogen-studio/frontend/src/components/views/session/chat/runview.tsx b/python/packages/autogen-studio/frontend/src/components/views/session/chat/runview.tsx index 005febf5042c..ddf662e7629e 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/session/chat/runview.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/session/chat/runview.tsx @@ -125,12 +125,6 @@ const RunView: React.FC = ({ const lastResultMessage = run.team_result?.task_result.messages.slice(-1)[0]; const lastMessage = run.messages.slice(-1)[0]; - console.log("lastResultMessage", lastResultMessage); - console.log( - "lastMessage", - run.messages[run.messages.length - 1]?.config?.content - ); - return (
{/* Run Header */} diff --git a/python/packages/autogen-studio/frontend/src/components/views/session/manager.tsx b/python/packages/autogen-studio/frontend/src/components/views/session/manager.tsx index 49d72b20ab6a..97be1dd0d045 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/session/manager.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/session/manager.tsx @@ -128,7 +128,6 @@ export const SessionManager: React.FC = () => { const handleQuickStart = async (teamId: number, teamName: string) => { if (!user?.email) return; - console.log("Quick start session", teamId, teamName); try { const defaultName = `${teamName.substring( 0, diff --git a/python/packages/autogen-studio/frontend/src/components/views/session/newsession.tsx b/python/packages/autogen-studio/frontend/src/components/views/session/newsession.tsx index f2288773d802..611b6bf57b3b 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/session/newsession.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/session/newsession.tsx @@ -71,14 +71,14 @@ const NewSessionControls = ({ // Update state first setSelectedTeamId(newTeamId); - // Save to localStorage - if (typeof window !== "undefined") { - localStorage.setItem("lastUsedTeamId", e.key); - } - - // Delay the session start to allow UI to update - await new Promise((resolve) => setTimeout(resolve, 100)); - onStartSession(newTeamId, selectedTeam.component.label || ""); + // // Save to localStorage + // if (typeof window !== "undefined") { + // localStorage.setItem("lastUsedTeamId", e.key); + // } + + // // Delay the session start to allow UI to update + // await new Promise((resolve) => setTimeout(resolve, 100)); + // onStartSession(newTeamId, selectedTeam.component.label || ""); }; const hasNoTeams = !isLoading && teams.length === 0; diff --git a/python/packages/autogen-studio/frontend/src/components/views/session/sidebar.tsx b/python/packages/autogen-studio/frontend/src/components/views/session/sidebar.tsx index fdc77cc6f981..44530aba1c95 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/session/sidebar.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/session/sidebar.tsx @@ -103,6 +103,10 @@ export const Sidebar: React.FC = ({
Recents{" "} + + {" "} + ({sessions.length}){" "} + {" "} {isLoading && ( )} @@ -117,7 +121,7 @@ export const Sidebar: React.FC = ({
)} -
+
{sessions.map((s) => (
= ({ // const [isDirty, setIsDirty] = useState(false); const editorRef = useRef(null); const [messageApi, contextHolder] = message.useMessage(); + const [activeDragItem, setActiveDragItem] = useState( + null + ); + + const [testDrawerVisible, setTestDrawerVisible] = useState(false); const { undo, @@ -262,11 +284,23 @@ export const TeamBuilder: React.FC = ({ // Pass both new node data AND target node id addNode(position, draggedItem.config, nodeId); + setActiveDragItem(null); + }; + + const handleTestDrawerClose = () => { + console.log("TestDrawer closed"); + setTestDrawerVisible(false); }; const onDragStart = (item: DragItem) => { // We can add any drag start logic here if needed }; + const handleDragStart = (event: DragStartEvent) => { + const { active } = event; + if (active.data.current) { + setActiveDragItem(active.data.current as DragItemData); + } + }; return (
{contextHolder} @@ -304,6 +338,18 @@ export const TeamBuilder: React.FC = ({
+ + +
); }; diff --git a/python/packages/autogen-studio/frontend/src/components/views/team/builder/library.tsx b/python/packages/autogen-studio/frontend/src/components/views/team/builder/library.tsx index 34118174e464..65b277b6bfe1 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/team/builder/library.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/team/builder/library.tsx @@ -51,7 +51,7 @@ const PresetItem: React.FC = ({ const style = { transform: CSS.Transform.toString(transform), - opacity: isDragging ? 0.5 : undefined, + opacity: isDragging ? 0.8 : undefined, }; return ( @@ -60,7 +60,7 @@ const PresetItem: React.FC = ({ style={style} {...attributes} {...listeners} - className="p-2 text-primary mb-2 border border-secondary rounded cursor-move hover:bg-secondary transition-colors " + className={`p-2 text-primary mb-2 border rounded cursor-move bg-secondary transition-colors`} >
@@ -178,7 +178,7 @@ export const ComponentLibrary: React.FC = () => { return (
diff --git a/python/packages/autogen-studio/frontend/src/components/views/team/builder/testdrawer.tsx b/python/packages/autogen-studio/frontend/src/components/views/team/builder/testdrawer.tsx new file mode 100644 index 000000000000..0848abd7f5b3 --- /dev/null +++ b/python/packages/autogen-studio/frontend/src/components/views/team/builder/testdrawer.tsx @@ -0,0 +1,98 @@ +import React, { useContext, useEffect, useState } from "react"; + +import { Drawer, Button, message, Checkbox } from "antd"; +import { Team, Session } from "../../../types/datamodel"; +import ChatView from "../../session/chat/chat"; +import { appContext } from "../../../../hooks/provider"; +import { sessionAPI } from "../../session/api"; + +interface TestDrawerProps { + isVisble: boolean; + team: Team; + onClose: () => void; +} + +const TestDrawer = ({ isVisble, onClose, team }: TestDrawerProps) => { + const [session, setSession] = useState(null); + const { user } = useContext(appContext); + const [loading, setLoading] = useState(false); + const [deleteOnClose, setDeleteOnClose] = useState(true); + const [messageApi, contextHolder] = message.useMessage(); + + const createSession = async (teamId: number, teamName: string) => { + if (!user?.email) return; + try { + const defaultName = `Test Session ${teamName.substring( + 0, + 20 + )} - ${new Date().toLocaleString()} `; + const created = await sessionAPI.createSession( + { + name: defaultName, + team_id: teamId, + }, + user.email + ); + setSession(created); + } catch (error) { + messageApi.error("Error creating session"); + } + }; + + const deleteSession = async (sessionId: number) => { + if (!user?.email) return; + try { + await sessionAPI.deleteSession(sessionId, user.email); + setSession(null); // Clear session state after successful deletion + } catch (error) { + messageApi.error("Error deleting session"); + } + }; + + // Single effect to handle session creation when drawer opens + useEffect(() => { + if (isVisble && team?.id && !session) { + setLoading(true); + createSession( + team.id, + team.component.label || team.component.component_type + ).finally(() => { + setLoading(false); + }); + } + }, [isVisble, team?.id]); + + // Single cleanup handler in the Drawer's onClose + const handleClose = async () => { + if (session?.id && deleteOnClose) { + // Only delete if flag is true + await deleteSession(session.id); + } + onClose(); + }; + + return ( +
+ {contextHolder} + Test Team: {team.component.label}} + size="large" + placement="right" + onClose={handleClose} + open={isVisble} + extra={ + setDeleteOnClose(e.target.checked)} + > + Delete session on close + + } + > + {loading &&

Creating a test session...

} + {session && } +
+
+ ); +}; +export default TestDrawer; diff --git a/python/packages/autogen-studio/frontend/src/components/views/team/sidebar.tsx b/python/packages/autogen-studio/frontend/src/components/views/team/sidebar.tsx index b6aca411e4e1..e60c3dcff3e9 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/team/sidebar.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/team/sidebar.tsx @@ -255,7 +255,9 @@ export const TeamSidebar: React.FC = ({ onClick={(e) => { e.stopPropagation(); galleryTeam.label = - galleryTeam.label + "_" + new Date().getTime(); + galleryTeam.label + + "_" + + (new Date().getTime() + "").substring(0, 5); onCreateTeam({ component: galleryTeam, }); diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index 63fc9a65794f..f4579354a604 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -32,6 +32,7 @@ dependencies = [ "alembic", "loguru", "pyyaml", + "html2text", "autogen-core>=0.4.5,<0.5", "autogen-agentchat>=0.4.5,<0.5", "autogen-ext[magentic-one, openai, azure]>=0.4.2,<0.5", diff --git a/python/uv.lock b/python/uv.lock index ceb0e23ada8b..dbe382896347 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -749,7 +749,7 @@ requires-dist = [ [[package]] name = "autogenstudio" -version = "0.4.0" +version = "0.4.1" source = { editable = "packages/autogen-studio" } dependencies = [ { name = "aiofiles" }, @@ -759,6 +759,7 @@ dependencies = [ { name = "autogen-ext", extra = ["azure", "magentic-one", "openai"] }, { name = "azure-identity" }, { name = "fastapi", extra = ["standard"] }, + { name = "html2text" }, { name = "loguru" }, { name = "numpy" }, { name = "psycopg" }, @@ -790,6 +791,7 @@ requires-dist = [ { name = "azure-identity" }, { name = "fastapi", marker = "extra == 'web'" }, { name = "fastapi", extras = ["standard"] }, + { name = "html2text" }, { name = "loguru" }, { name = "numpy", specifier = "<2.0.0" }, { name = "psycopg" },