Skip to content
16 changes: 15 additions & 1 deletion ms_agent/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,21 @@ def traverse_config(_config: Union[DictConfig, ListConfig, Any],
if not hasattr(current, final_key) or getattr(
current, final_key) is None:
logger.info(f'Adding new config key: {key}')
setattr(current, final_key, value)
# Convert temperature to float and max_tokens to int if they're numeric strings
value_to_set = value
if final_key == 'temperature' and isinstance(
value_to_set, str):
try:
value_to_set = float(value_to_set)
except (ValueError, TypeError):
pass
elif final_key == 'max_tokens' and isinstance(
value_to_set, str):
try:
value_to_set = int(value_to_set)
except (ValueError, TypeError):
pass
Comment on lines +220 to +232
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The logic for converting temperature and max_tokens from strings is helpful. However, this if/elif structure can become cumbersome to maintain as more type-specific conversions are needed. Consider using a dictionary to map keys to their conversion functions for a more scalable and maintainable approach.

Suggested change
value_to_set = value
if final_key == 'temperature' and isinstance(
value_to_set, str):
try:
value_to_set = float(value_to_set)
except (ValueError, TypeError):
pass
elif final_key == 'max_tokens' and isinstance(
value_to_set, str):
try:
value_to_set = int(value_to_set)
except (ValueError, TypeError):
pass
# A mapping for specific type conversions
CONVERSIONS = {
'temperature': float,
'max_tokens': int,
}
value_to_set = value
if final_key in CONVERSIONS and isinstance(value_to_set, str):
try:
value_to_set = CONVERSIONS[final_key](value_to_set)
except (ValueError, TypeError):
pass

setattr(current, final_key, value_to_set)

return None

Expand Down
4 changes: 2 additions & 2 deletions projects/code_genesis/PR_ARTICLE.md
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,8 @@ We curated a benchmark of **30 real-world project specifications** spanning prod
- **OpenCode** (open-source)

**Foundation Models**:
- **Qwen2.5-Coder-Plus** for simple projects
- **Qwen2.5-Max** for medium projects
- **Qwen3-Coder-Plus** for simple projects
- **Qwen3-Max** for medium projects

### 3.3 Evaluation Metrics

Expand Down
117 changes: 101 additions & 16 deletions webui/backend/agent_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,20 @@ async def _read_output(self):
if self._waiting_for_input:
# Check if process is still alive
if self.process.returncode is None:
# Flush any pending chat response before waiting
if self._is_chat_mode:
self._flush_chat_response()
# Send waiting_input message to enable frontend input
if self.on_output and not self._waiting_input_sent:
self.on_output({
'type': 'waiting_input',
'content': '',
'role': 'system',
'metadata': {
'waiting': True
}
})
self._waiting_input_sent = True
Comment on lines +411 to +424
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This block of logic for handling the "waiting for input" state is duplicated at lines 457-470. To improve maintainability and avoid future inconsistencies, consider extracting this logic into a private helper method, for example _handle_waiting_for_input().

# Process is still alive, continue waiting
continue
else:
Expand Down Expand Up @@ -440,6 +454,20 @@ async def _read_output(self):
if self._waiting_for_input:
# Check if process is still alive
if self.process.returncode is None:
# Flush any pending chat response before waiting
if self._is_chat_mode:
self._flush_chat_response()
# Send waiting_input message to enable frontend input
if self.on_output and not self._waiting_input_sent:
self.on_output({
'type': 'waiting_input',
'content': '',
'role': 'system',
'metadata': {
'waiting': True
}
})
self._waiting_input_sent = True
print(
'[Runner] Agent is waiting for user input, keeping process alive...'
)
Expand Down Expand Up @@ -609,35 +637,91 @@ def _clean_log_prefix(text: str) -> str:
return text.strip()

async def _process_chat_line(self, line: str):
"""Simple chat mode - send response and wait for next input"""
# Detect [assistant]: marker - next lines will be the response
"""Simple chat mode - handle assistant output, tool calls, and tool results"""
cleaned = self._clean_log_prefix(line)

# Detect [tool_calling]: marker - flush assistant output and start collecting tool call
if '[tool_calling]:' in line:
self._flush_chat_response()
self._collecting_tool_call = True
self._tool_call_json_buffer = ''
return

# Collect tool call JSON
if self._collecting_tool_call:
if cleaned:
if self._tool_call_json_buffer:
self._tool_call_json_buffer += '\n' + cleaned
else:
self._tool_call_json_buffer = cleaned
# Check if we have a complete JSON object
if cleaned == '}' and self._tool_call_json_buffer.strip(
).startswith('{'):
self._flush_tool_call()
return

# Detect tool execution result (success or error)
if 'execute tool call' in line:
if self.on_output:
is_error = 'error' in line.lower()
self.on_output({
'type': 'tool_result',
'content': cleaned,
'role': 'assistant',
'metadata': {
'is_error': is_error
}
})
return

# Detect [assistant]: marker - start collecting
if '[assistant]:' in line:
self._flush_chat_response()
self._collecting_assistant_output = True
self._chat_response_buffer = ''
return

# If collecting, send content immediately as complete
# Detect end markers - flush assistant output
end_markers = ['[user]:']
for marker in end_markers:
if marker in line:
self._flush_chat_response()
return

# If collecting assistant output, accumulate the content
if self._collecting_assistant_output:
cleaned = self._clean_log_prefix(line)
if cleaned:
if self._chat_response_buffer:
self._chat_response_buffer += '\n' + cleaned
else:
self._chat_response_buffer = cleaned
# Send immediately with done=true (non-streaming mode)
print(
f'[Runner] Chat response: {len(self._chat_response_buffer)} chars'
)
if self.on_output:
self.on_output({
'type': 'stream',
'content': self._chat_response_buffer,
'role': 'assistant',
'done': True
})
# Mark as waiting for input - process is still running
self._waiting_for_input = True

def _flush_tool_call(self):
"""Send tool call information to frontend"""
if self._is_chat_mode and self._tool_call_json_buffer.strip(
) and self.on_output:
try:
import json
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

It's a best practice in Python to place all imports at the top of the file. This improves readability and helps avoid issues like circular dependencies. Please move import json to the top of webui/backend/agent_runner.py.

tool_data = json.loads(self._tool_call_json_buffer)
tool_name = tool_data.get('tool_name', 'unknown')
print(f'[Runner] Tool call: {tool_name}')
self.on_output({
'type': 'tool_call',
'content': '',
'role': 'assistant',
'metadata': {
'tool_name': tool_name,
'arguments': tool_data.get('arguments', {}),
'id': tool_data.get('id', '')
}
})
except json.JSONDecodeError:
print('[Runner] Failed to parse tool call JSON')
self._tool_call_json_buffer = ''
self._collecting_tool_call = False

def _flush_chat_response(self):
"""Send final chat response with done=True"""
if self._is_chat_mode and self._chat_response_buffer.strip(
Expand All @@ -652,7 +736,8 @@ def _flush_chat_response(self):
'done': True
})
self._chat_response_buffer = ''
self._collecting_assistant_output = False
# Don't reset _collecting_assistant_output here - more content may come
# It will be reset when we see [tool_calling]: or [user]: or process exits

async def _process_line(self, line: str):
"""Process a line of output"""
Expand Down
7 changes: 6 additions & 1 deletion webui/backend/project_discovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
class ProjectDiscovery:
"""Discovers and manages projects from the ms-agent projects directory"""

# Whitelist of projects to show in the UI
VISIBLE_PROJECTS = {'code_genesis', 'singularity_cinema'}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Hardcoding the VISIBLE_PROJECTS whitelist in the source code can make it difficult to update without code changes and redeployment. For better flexibility and maintainability, consider moving this list to a configuration file (e.g., a JSON or YAML file).


def __init__(self, projects_dir: str):
self.projects_dir = projects_dir
self._projects_cache: Optional[List[Dict[str, Any]]] = None
Expand All @@ -28,7 +31,9 @@ def discover_projects(self,

for item in os.listdir(self.projects_dir):
item_path = os.path.join(self.projects_dir, item)
if os.path.isdir(item_path) and not item.startswith('.'):
# Only show projects in the whitelist
if os.path.isdir(item_path) and not item.startswith(
'.') and item in self.VISIBLE_PROJECTS:
project_info = self._analyze_project(item, item_path)
if project_info:
projects.append(project_info)
Expand Down
9 changes: 8 additions & 1 deletion webui/backend/websocket_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,14 @@ async def start_agent(session_id: str, data: Dict[str, Any],
import ms_agent
from pathlib import Path
# Get ms_agent package installation path
ms_agent_package_path = Path(ms_agent.__file__).parent
# Use __path__ which is always available for packages and gives real filesystem paths
if hasattr(ms_agent, '__path__') and ms_agent.__path__:
ms_agent_package_path = Path(ms_agent.__path__[0])
elif ms_agent.__file__ is not None:
ms_agent_package_path = Path(ms_agent.__file__).parent
else:
raise RuntimeError('Cannot determine ms_agent package path. '
'Please ensure ms_agent is properly installed.')
chat_config_path = ms_agent_package_path / 'agent' / 'agent.yaml'

project = {
Expand Down
Loading
Loading