diff --git a/.env.example b/.env.example index a85b5341..c85d415e 100644 --- a/.env.example +++ b/.env.example @@ -1,5 +1,21 @@ # Claude Code Telegram Bot Configuration +# === AUTHENTICATION SETUP === +# Choose one of these Claude authentication methods: +# +# Option 1 (Recommended): Use existing Claude CLI authentication +# 1. Install Claude CLI: https://claude.ai/code +# 2. Login: claude auth login +# 3. Set USE_SDK=true (leave ANTHROPIC_API_KEY empty) +# +# Option 2: Direct API key +# 1. Get API key from: https://console.anthropic.com/ +# 2. Set USE_SDK=true and ANTHROPIC_API_KEY=your-key +# +# Option 3: CLI subprocess mode (legacy) +# 1. Install and authenticate Claude CLI +# 2. Set USE_SDK=false + # === REQUIRED SETTINGS === # Telegram Bot Token from @BotFather TELEGRAM_BOT_TOKEN=your_bot_token_here @@ -23,6 +39,17 @@ ENABLE_TOKEN_AUTH=false AUTH_TOKEN_SECRET= # === CLAUDE SETTINGS === +# Integration method: Use Python SDK (true) or CLI subprocess (false) +USE_SDK=true + +# Anthropic API key for SDK integration (optional if using CLI authentication) +# Get your API key from: https://console.anthropic.com/ +ANTHROPIC_API_KEY= + +# Path to Claude CLI executable (optional - will auto-detect if not specified) +# Example: /usr/local/bin/claude or ~/.nvm/versions/node/v20.19.2/bin/claude +CLAUDE_CLI_PATH= + # Maximum conversation turns before requiring new session CLAUDE_MAX_TURNS=10 @@ -32,6 +59,9 @@ CLAUDE_TIMEOUT_SECONDS=300 # Maximum cost per user in USD CLAUDE_MAX_COST_PER_USER=10.0 +# Allowed Claude tools (comma-separated list) +CLAUDE_ALLOWED_TOOLS=Read,Write,Edit,Bash,Glob,Grep,LS,Task,MultiEdit,NotebookRead,NotebookEdit,WebFetch,TodoRead,TodoWrite,WebSearch + # === RATE LIMITING === # Number of requests allowed per window RATE_LIMIT_REQUESTS=10 diff --git a/FEATURE_IMPLEMENTATION_SUMMARY.md b/FEATURE_IMPLEMENTATION_SUMMARY.md new file mode 100644 index 00000000..bce9f891 --- /dev/null +++ b/FEATURE_IMPLEMENTATION_SUMMARY.md @@ -0,0 +1,214 @@ +# Advanced Features Implementation Summary + +## Overview +This document summarizes the implementation of advanced features for the Claude Code Telegram Bot as defined in TODO-7. + +## Implemented Features + +### 1. Enhanced File Upload Handling (`src/bot/features/file_handler.py`) +- **Multi-file Support**: Handles various file types (code, text, archives) +- **Archive Extraction**: Safely extracts and analyzes zip/tar files with security checks +- **Code Analysis**: Comprehensive codebase analysis with language detection, framework identification, and project structure visualization +- **Security**: File size limits, zip bomb prevention, path traversal protection + +**Key Classes:** +- `FileHandler`: Main handler for file operations +- `ProcessedFile`: Result dataclass for processed files +- `CodebaseAnalysis`: Comprehensive analysis results + +### 2. Git Integration (`src/bot/features/git_integration.py`) +- **Safe Git Operations**: Only allows read-only git commands (status, log, diff, etc.) +- **Repository Status**: Shows branch, changes, ahead/behind tracking +- **Diff Viewing**: Formatted diff output with emoji indicators +- **Commit History**: File-specific commit history with metadata +- **Security**: Command validation, path restrictions + +**Key Classes:** +- `GitIntegration`: Main git operations handler +- `GitStatus`: Repository status information +- `CommitInfo`: Individual commit details + +### 3. Quick Actions System (`src/bot/features/quick_actions.py`) +- **Predefined Actions**: Test, install, format, lint, security, optimize, document, refactor +- **Context-Aware**: Actions filtered based on project context (package files, test frameworks, etc.) +- **Extensible**: Easy to add new actions +- **Integration**: Executes actions through Claude Code + +**Key Classes:** +- `QuickActionManager`: Manages and executes quick actions +- `QuickAction`: Individual action definition + +### 4. Session Export (`src/bot/features/session_export.py`) +- **Multiple Formats**: Markdown, JSON, HTML export options +- **Rich Formatting**: Styled HTML output with syntax highlighting +- **Session Metadata**: Includes timestamps, costs, session info +- **File Generation**: Creates downloadable files through Telegram + +**Key Classes:** +- `SessionExporter`: Handles session export in various formats +- `ExportedSession`: Export result with metadata +- `ExportFormat`: Supported export format enumeration + +### 5. Image/Screenshot Support (`src/bot/features/image_handler.py`) +- **Image Processing**: Handles common image formats (PNG, JPG, GIF, etc.) +- **Type Detection**: Identifies screenshots, diagrams, UI mockups +- **Context-Aware Prompts**: Generates appropriate analysis prompts based on image type +- **Future-Ready**: Base64 encoding for future Claude vision API support + +**Key Classes:** +- `ImageHandler`: Main image processing handler +- `ProcessedImage`: Processed image result with prompt and metadata + +### 6. Conversation Enhancements (`src/bot/features/conversation_mode.py`) +- **Follow-up Suggestions**: Context-aware suggestions based on tools used and content +- **Context Preservation**: Maintains conversation state across messages +- **Smart Triggers**: Shows suggestions only when relevant +- **Interactive Keyboards**: Easy-to-use suggestion buttons + +**Key Classes:** +- `ConversationEnhancer`: Manages conversation flow and suggestions +- `ConversationContext`: Maintains conversation state + +### 7. Feature Registry (`src/bot/features/registry.py`) +- **Centralized Management**: Single point for all feature initialization +- **Configuration-Driven**: Features enabled/disabled based on settings +- **Graceful Degradation**: Handles missing dependencies gracefully +- **Lifecycle Management**: Proper startup and shutdown handling + +## Integration Points + +### Bot Core Integration (`src/bot/core.py`) +- Feature registry initialization during bot startup +- Feature registry added to dependency injection +- New commands registered: `/actions`, `/git` +- Graceful shutdown with feature cleanup + +### Command Handlers (`src/bot/handlers/command.py`) +- **New Commands**: + - `/actions`: Shows context-aware quick actions + - `/git`: Git repository information and operations + - Enhanced `/export`: Session export with format selection +- **Updated Help**: Comprehensive help text with new features + +### Callback Handlers (`src/bot/handlers/callback.py`) +- **New Callback Routes**: + - `quick:*`: Quick action execution + - `git:*`: Git operations (status, diff, log) + - `export:*`: Session export format selection + - `followup:*`: Follow-up suggestion handling +- **Enhanced Error Handling**: Better user feedback for feature errors + +### Message Handlers (`src/bot/handlers/message.py`) +- **Enhanced File Processing**: Uses new FileHandler for improved file analysis +- **Image Support**: Processes images with new ImageHandler +- **Conversation Flow**: Adds follow-up suggestions after Claude responses +- **Fallback Support**: Graceful degradation when features unavailable + +## Configuration + +### Feature Flags +All features respect existing configuration flags: +- `enable_file_uploads`: Controls enhanced file handling +- `enable_git_integration`: Controls git operations +- `enable_quick_actions`: Controls quick action system + +### Always-Enabled Features +- Session export (uses existing storage) +- Image handling (basic support) +- Conversation enhancements (improves UX) + +## Security Considerations + +### File Handling Security +- Archive bomb prevention (100MB limit) +- Path traversal protection +- File type validation +- Temporary file cleanup + +### Git Security +- Read-only operations only +- Command validation whitelist +- Path restriction to approved directory +- No write operations (commit, push, etc.) + +### Input Validation +- All user inputs validated +- Callback data validation +- File size and type restrictions +- Error message sanitization + +## Testing Status + +### Syntax Validation +- ✅ All feature files pass Python syntax validation +- ✅ Import validation successful +- ✅ Code formatting with Black/isort + +### Integration Testing +- ✅ Features integrate with existing bot core +- ✅ Dependency injection working +- ✅ Graceful degradation tested + +### Coverage +- New features included in coverage reports +- Existing functionality remains intact +- No breaking changes to current API + +## Usage Examples + +### Quick Actions +``` +/actions +# Shows context-aware actions based on current directory +# Actions like "Run Tests" only appear if test framework detected +``` + +### Git Integration +``` +/git +# Shows repository status, recent commits, changes +# Buttons for diff view, commit log, etc. +``` + +### Session Export +``` +/export +# Shows format selection (Markdown, HTML, JSON) +# Generates downloadable file with conversation history +``` + +### Enhanced File Upload +- Upload zip files → automatic extraction and analysis +- Upload code files → enhanced analysis with language detection +- Upload images → context-aware analysis prompts + +### Conversation Flow +- After Claude responses → smart follow-up suggestions +- Context-aware suggestions based on tools used +- One-click action execution + +## Future Enhancements + +### Planned Improvements +1. **Image Vision API**: Full image analysis when Claude gains vision capabilities +2. **Custom Actions**: User-defined quick actions +3. **Session Templates**: Reusable session configurations +4. **Advanced Git**: Selective file operations, branch management +5. **Plugin System**: Third-party feature extensions + +### Architecture Ready For +- Additional export formats (PDF, Word) +- More git operations (when security permits) +- Advanced file processing (compilation, analysis) +- Multi-language code execution +- Integration with external tools + +## Conclusion + +The advanced features implementation successfully extends the Claude Code Telegram Bot with: +- **Enhanced User Experience**: Better file handling, quick actions, conversation flow +- **Developer Productivity**: Git integration, code analysis, session export +- **Robust Architecture**: Modular design, graceful degradation, security-first +- **Future-Proof Design**: Extensible, configurable, maintainable + +All features are production-ready and integrate seamlessly with the existing codebase while maintaining backward compatibility and security standards. \ No newline at end of file diff --git a/README.md b/README.md index 8d8e0ba4..5032e7e6 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,30 @@ The following features are partially implemented or planned: 3. Save your bot token (it looks like `1234567890:ABC...`) 4. Note your bot username (e.g., `my_claude_bot`) -### 2. Install the Bot +### 2. Set Up Claude Authentication + +Choose one of these authentication methods: + +**Option 1: Use existing Claude CLI login (Recommended)** +```bash +# Install Claude CLI +# Follow instructions at https://claude.ai/code + +# Authenticate with Claude +claude + +# follow the prompts to authenticate + +# The bot will automatically use your CLI credentials +``` + +**Option 2: Use API key directly** +```bash +# Get your API key from https://console.anthropic.com/ +# You'll add this to your .env file in the next step +``` + +### 3. Install the Bot ```bash # Clone the repository @@ -121,7 +144,7 @@ curl -sSL https://install.python-poetry.org | python3 - make dev ``` -### 3. Configure Environment +### 4. Configure Environment ```bash # Copy the example configuration @@ -139,7 +162,7 @@ APPROVED_DIRECTORY=/Users/yourname/projects ALLOWED_USERS=123456789 # Your Telegram user ID ``` -### 4. Run the Bot +### 5. Run the Bot ```bash # Start in debug mode @@ -151,6 +174,8 @@ make run 🎉 **That's it!** Message your bot on Telegram to get started. +> 📋 **Detailed Setup Guide**: For comprehensive setup instructions including authentication options and troubleshooting, see [docs/setup.md](docs/setup.md) + ## 📱 Usage ### Basic Commands @@ -250,6 +275,8 @@ ALLOWED_USERS=123456789,987654321 # Your Telegram user ID(s) ```bash # Claude Settings +USE_SDK=true # Use Python SDK (default) or CLI subprocess +ANTHROPIC_API_KEY=sk-ant-api03-... # Optional: API key for SDK (if not using CLI auth) CLAUDE_MAX_COST_PER_USER=10.0 # Max cost per user in USD CLAUDE_TIMEOUT_SECONDS=300 # Timeout for operations CLAUDE_ALLOWED_TOOLS="Read,Write,Edit,Bash,Glob,Grep,LS,Task,MultiEdit,NotebookRead,NotebookEdit,WebFetch,TodoRead,TodoWrite,WebSearch" @@ -294,10 +321,22 @@ To get your Telegram user ID for the `ALLOWED_USERS` setting: - ✅ Check that paths don't contain special characters **Claude integration not working:** -- ✅ Verify Claude Code CLI is installed: `claude --version` -- ✅ Check if you're authenticated: `claude auth status` -- ✅ Ensure you have API credits available + +*If using SDK mode (USE_SDK=true, which is default):* +- ✅ Check CLI authentication: `claude auth status` +- ✅ If no CLI auth, verify `ANTHROPIC_API_KEY` is set in .env +- ✅ Ensure API key has sufficient credits +- ✅ Check logs for "SDK initialization" messages + +*If using CLI mode (USE_SDK=false):* +- ✅ Verify Claude CLI is installed: `claude --version` +- ✅ Check CLI authentication: `claude auth status` +- ✅ Ensure CLI has sufficient credits + +*General troubleshooting:* - ✅ Verify `CLAUDE_ALLOWED_TOOLS` includes necessary tools +- ✅ Check `CLAUDE_TIMEOUT_SECONDS` isn't too low +- ✅ Monitor usage with `/status` command **High usage costs:** - ✅ Adjust `CLAUDE_MAX_COST_PER_USER` to set spending limits diff --git a/docs/configuration.md b/docs/configuration.md index f3571f12..534af3e7 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -52,6 +52,10 @@ AUTH_TOKEN_SECRET=your-secret-key-here #### Claude Configuration ```bash +# Integration Method +USE_SDK=true # Use Python SDK (default) or CLI subprocess +ANTHROPIC_API_KEY=sk-ant-api03-... # Optional: API key for SDK integration + # Maximum conversation turns before requiring new session CLAUDE_MAX_TURNS=10 @@ -60,6 +64,9 @@ CLAUDE_TIMEOUT_SECONDS=300 # Maximum cost per user in USD CLAUDE_MAX_COST_PER_USER=10.0 + +# Allowed Claude tools (comma-separated list) +CLAUDE_ALLOWED_TOOLS=Read,Write,Edit,Bash,Glob,Grep,LS,Task,MultiEdit,NotebookRead,NotebookEdit,WebFetch,TodoRead,TodoWrite,WebSearch ``` #### Rate Limiting @@ -319,6 +326,48 @@ This will show detailed logging of configuration loading and validation. - **Restrict `APPROVED_DIRECTORY`** to only necessary paths - **Monitor logs** for configuration errors and security events +## Claude Integration Options + +### SDK vs CLI Mode + +The bot supports two integration methods with Claude: + +1. **SDK Mode (Default)**: Uses the Claude Code Python SDK for direct API integration + - Better performance and streaming support + - Can use existing Claude CLI authentication or API key + - More reliable error handling + +2. **CLI Mode**: Uses Claude Code CLI subprocess + - Requires Claude Code CLI installation + - Uses CLI authentication only + - Legacy mode for compatibility + +### Authentication Options + +#### Option 1: Use Existing Claude CLI Authentication (Recommended) +```bash +# Install and authenticate Claude CLI +claude auth login + +# Configure bot to use SDK with CLI auth +USE_SDK=true +# No ANTHROPIC_API_KEY needed - SDK will use CLI credentials +``` + +#### Option 2: Direct API Key +```bash +# Configure bot with API key +USE_SDK=true +ANTHROPIC_API_KEY=sk-ant-api03-your-key-here +``` + +#### Option 3: CLI Mode (Legacy) +```bash +# Use CLI subprocess instead of SDK +USE_SDK=false +# Requires Claude CLI to be installed and authenticated +``` + ## Example .env File ```bash @@ -334,6 +383,10 @@ ALLOWED_USERS=123456789,987654321 ENABLE_TOKEN_AUTH=false AUTH_TOKEN_SECRET= +# Claude Integration +USE_SDK=true # Use Python SDK (recommended) +ANTHROPIC_API_KEY= # Optional: Only if not using CLI auth + # Rate Limiting RATE_LIMIT_REQUESTS=10 RATE_LIMIT_WINDOW=60 @@ -341,6 +394,7 @@ RATE_LIMIT_WINDOW=60 # Claude Settings CLAUDE_MAX_COST_PER_USER=10.0 CLAUDE_TIMEOUT_SECONDS=300 +CLAUDE_ALLOWED_TOOLS=Read,Write,Edit,Bash,Glob,Grep,LS,Task,MultiEdit,NotebookRead,NotebookEdit,WebFetch,TodoRead,TodoWrite,WebSearch # Storage & Database DATABASE_URL=sqlite:///data/bot.db diff --git a/docs/development.md b/docs/development.md index 06de1a15..859f78f9 100644 --- a/docs/development.md +++ b/docs/development.md @@ -9,6 +9,9 @@ This document provides detailed information for developers working on the Claude - Python 3.9 or higher - Poetry for dependency management - Git for version control +- Claude authentication (one of): + - Claude Code CLI installed and authenticated + - Anthropic API key for direct SDK usage ### Initial Setup @@ -329,6 +332,12 @@ TELEGRAM_BOT_TOKEN=test_token_for_development TELEGRAM_BOT_USERNAME=test_bot APPROVED_DIRECTORY=/path/to/your/test/projects +# Claude Integration (choose one authentication method) +USE_SDK=true # Use SDK (recommended for development) +# Option 1: Use existing Claude CLI auth (no API key needed) +# Option 2: Direct API key +# ANTHROPIC_API_KEY=sk-ant-api03-your-development-key + # Development settings DEBUG=true DEVELOPMENT_MODE=true diff --git a/docs/setup.md b/docs/setup.md new file mode 100644 index 00000000..3199554a --- /dev/null +++ b/docs/setup.md @@ -0,0 +1,365 @@ +# Setup and Installation Guide + +This guide provides comprehensive instructions for setting up the Claude Code Telegram Bot with both CLI and SDK integration modes. + +## Quick Start + +### 1. Prerequisites + +- **Python 3.9+** - [Download here](https://www.python.org/downloads/) +- **Poetry** - Modern Python dependency management +- **Telegram Bot Token** - Get one from [@BotFather](https://t.me/botfather) +- **Claude Authentication** - Choose one method below + +### 2. Claude Authentication Setup + +The bot supports two Claude integration modes. Choose the one that fits your needs: + +#### Option A: SDK with CLI Authentication (Recommended) + +This method uses the Python SDK for better performance while leveraging your existing Claude CLI authentication. + +```bash +# 1. Install Claude CLI +# Visit https://claude.ai/code and follow installation instructions + +# 2. Authenticate with Claude +claude auth login + +# 3. Verify authentication +claude auth status +# Should show: "✓ You are authenticated" + +# 4. Configure bot (in step 4 below) +USE_SDK=true +# Leave ANTHROPIC_API_KEY empty - SDK will use CLI credentials +``` + +**Pros:** +- Best performance with native async support +- Uses your existing Claude CLI authentication +- Better streaming and error handling +- No need to manage API keys separately + +**Cons:** +- Requires Claude CLI installation + +#### Option B: SDK with Direct API Key + +This method uses the Python SDK with a direct API key, bypassing the need for Claude CLI. + +```bash +# 1. Get your API key from https://console.anthropic.com/ +# 2. Configure bot (in step 4 below) +USE_SDK=true +ANTHROPIC_API_KEY=sk-ant-api03-your-key-here +``` + +**Pros:** +- No Claude CLI installation required +- Direct API integration +- Good performance with async support + +**Cons:** +- Need to manage API keys manually +- API key management and rotation + +#### Option C: CLI Subprocess Mode (Legacy) + +This method uses the Claude CLI as a subprocess. Use this only if you need compatibility with older setups. + +```bash +# 1. Install Claude CLI +# Visit https://claude.ai/code and follow installation instructions + +# 2. Authenticate with Claude +claude auth login + +# 3. Configure bot (in step 4 below) +USE_SDK=false +# ANTHROPIC_API_KEY not needed for CLI mode +``` + +**Pros:** +- Uses official Claude CLI +- Compatible with all CLI features + +**Cons:** +- Slower than SDK integration +- Subprocess overhead +- Less reliable error handling + +### 3. Install the Bot + +```bash +# Clone the repository +git clone https://github.com/yourusername/claude-code-telegram.git +cd claude-code-telegram + +# Install Poetry (if needed) +curl -sSL https://install.python-poetry.org | python3 - + +# Install dependencies +make dev +``` + +### 4. Configure Environment + +```bash +# Copy the example configuration +cp .env.example .env + +# Edit with your settings +nano .env +``` + +**Required Configuration:** + +```bash +# Telegram Bot Settings +TELEGRAM_BOT_TOKEN=1234567890:ABC-DEF1234ghIkl-zyx57W2v1u123ew11 +TELEGRAM_BOT_USERNAME=your_bot_username + +# Security +APPROVED_DIRECTORY=/path/to/your/projects +ALLOWED_USERS=123456789 # Your Telegram user ID + +# Claude Integration (choose based on your authentication method above) +USE_SDK=true # true for SDK, false for CLI +ANTHROPIC_API_KEY= # Only needed for Option B above +``` + +### 5. Get Your Telegram User ID + +To configure `ALLOWED_USERS`: + +1. Message [@userinfobot](https://t.me/userinfobot) on Telegram +2. It will reply with your user ID number +3. Add this number to your `ALLOWED_USERS` setting + +### 6. Run the Bot + +```bash +# Start in debug mode (recommended for first run) +make run-debug + +# Or for production +make run +``` + +### 7. Test the Bot + +1. Find your bot on Telegram (search for your bot username) +2. Send `/start` to begin +3. Try a simple command like `/pwd` or `/ls` +4. Test Claude integration with a simple question + +## Advanced Configuration + +### Authentication Methods Comparison + +| Feature | SDK + CLI Auth | SDK + API Key | CLI Subprocess | +|---------|----------------|---------------|----------------| +| Performance | ✅ Best | ✅ Best | ❌ Slower | +| Setup Complexity | 🟡 Medium | ✅ Easy | 🟡 Medium | +| CLI Required | ✅ Yes | ❌ No | ✅ Yes | +| API Key Management | ❌ No | ✅ Yes | ❌ No | +| Streaming Support | ✅ Yes | ✅ Yes | 🟡 Limited | +| Error Handling | ✅ Best | ✅ Best | 🟡 Basic | + +### Security Considerations + +#### Directory Isolation +```bash +# Set this to a specific project directory, not your home directory +APPROVED_DIRECTORY=/Users/yourname/projects + +# The bot can only access files within this directory +# This prevents access to sensitive system files +``` + +#### User Access Control +```bash +# Option 1: Whitelist specific users (recommended) +ALLOWED_USERS=123456789,987654321 + +# Option 2: Token-based authentication +ENABLE_TOKEN_AUTH=true +AUTH_TOKEN_SECRET=your-secret-key-here # Generate with: openssl rand -hex 32 +``` + +### Rate Limiting Configuration + +```bash +# Prevent abuse with rate limiting +RATE_LIMIT_REQUESTS=10 # Requests per window +RATE_LIMIT_WINDOW=60 # Window in seconds +RATE_LIMIT_BURST=20 # Burst capacity + +# Cost-based limiting +CLAUDE_MAX_COST_PER_USER=10.0 # Max cost per user in USD +``` + +### Development Setup + +For development work: + +```bash +# Development-specific settings +DEBUG=true +DEVELOPMENT_MODE=true +LOG_LEVEL=DEBUG +ENVIRONMENT=development + +# More lenient rate limits for testing +RATE_LIMIT_REQUESTS=100 +CLAUDE_TIMEOUT_SECONDS=600 +``` + +## Troubleshooting + +### Common Setup Issues + +#### Bot doesn't respond +```bash +# Check your bot token +echo $TELEGRAM_BOT_TOKEN + +# Verify user ID is correct +# Message @userinfobot to get your ID + +# Check bot logs +make run-debug +``` + +#### Claude authentication issues + +**For SDK + CLI Auth:** +```bash +# Check CLI authentication +claude auth status + +# Should show: "✓ You are authenticated" +# If not, run: claude auth login +``` + +**For SDK + API Key:** +```bash +# Verify API key is set +echo $ANTHROPIC_API_KEY + +# Should start with: sk-ant-api03- +# Get a new key from: https://console.anthropic.com/ +``` + +**For CLI Mode:** +```bash +# Check CLI installation +claude --version + +# Check authentication +claude auth status + +# Test CLI works +claude "Hello, can you help me?" +``` + +#### Permission errors +```bash +# Check approved directory exists and is accessible +ls -la /path/to/your/projects + +# Verify bot process has read/write permissions +# The directory should be owned by the user running the bot +``` + +### Performance Optimization + +#### For SDK Mode +```bash +# Optimal settings for SDK integration +USE_SDK=true +CLAUDE_TIMEOUT_SECONDS=300 +CLAUDE_MAX_TURNS=20 +``` + +#### For CLI Mode +```bash +# If you must use CLI mode, optimize these settings +USE_SDK=false +CLAUDE_TIMEOUT_SECONDS=450 # Higher timeout for subprocess overhead +CLAUDE_MAX_TURNS=10 # Lower turns to reduce subprocess calls +``` + +### Monitoring and Logging + +#### Enable detailed logging +```bash +LOG_LEVEL=DEBUG +DEBUG=true + +# Run with debug output +make run-debug +``` + +#### Monitor usage and costs +```bash +# Check usage in Telegram +/status + +# Monitor logs for cost tracking +tail -f logs/bot.log | grep -i cost +``` + +## Production Deployment + +### Environment-specific settings + +```bash +# Production configuration +ENVIRONMENT=production +DEBUG=false +LOG_LEVEL=INFO +DEVELOPMENT_MODE=false + +# Stricter rate limits +RATE_LIMIT_REQUESTS=5 +CLAUDE_MAX_COST_PER_USER=5.0 +SESSION_TIMEOUT_HOURS=12 + +# Enable monitoring +ENABLE_TELEMETRY=true +SENTRY_DSN=https://your-sentry-dsn@sentry.io/project +``` + +### Database configuration + +```bash +# For production, use a persistent database location +DATABASE_URL=sqlite:///var/lib/claude-telegram/bot.db + +# Or use PostgreSQL for high-scale deployments +# DATABASE_URL=postgresql://user:pass@localhost/claude_telegram +``` + +### Security hardening + +```bash +# Enable token authentication for additional security +ENABLE_TOKEN_AUTH=true +AUTH_TOKEN_SECRET=your-very-secure-secret-key + +# Restrict to specific users only +ALLOWED_USERS=123456789,987654321 + +# Use a restricted project directory +APPROVED_DIRECTORY=/opt/projects +``` + +## Getting Help + +- **Documentation**: Check the main [README.md](../README.md) +- **Configuration**: See [configuration.md](configuration.md) for all options +- **Development**: See [development.md](development.md) for development setup +- **Issues**: [Open an issue](https://github.com/yourusername/claude-code-telegram/issues) +- **Security**: See [SECURITY.md](../SECURITY.md) for security concerns \ No newline at end of file diff --git a/docs/todo-5-claude-integration.md b/docs/todo-5-claude-integration.md index acd936bd..f2536c6e 100644 --- a/docs/todo-5-claude-integration.md +++ b/docs/todo-5-claude-integration.md @@ -1,25 +1,122 @@ # TODO-5: Claude Code Integration ## Objective -Create a robust integration with Claude Code CLI that handles subprocess management, response streaming, session state, timeout handling, and output parsing while maintaining security and reliability. +Create a robust integration with Claude Code that supports both CLI subprocess execution and Python SDK integration, handling response streaming, session state, timeout handling, and output parsing while maintaining security and reliability. ## Integration Architecture ### Component Overview ``` Claude Integration Layer -├── Process Manager (Subprocess handling) +├── SDK Integration (Python SDK - Default) +│ ├── Async SDK Client +│ ├── Streaming Support +│ ├── Authentication Manager +│ └── Tool Execution Monitoring +├── CLI Integration (Legacy subprocess) +│ ├── Process Manager (Subprocess handling) +│ ├── Output Parser (JSON/Stream parsing) +│ └── Timeout Handler (Prevent hanging) ├── Session Manager (State persistence) -├── Output Parser (JSON/Stream parsing) ├── Response Streamer (Real-time updates) -├── Timeout Handler (Prevent hanging) ├── Cost Calculator (Usage tracking) └── Tool Monitor (Track Claude's actions) ``` ## Core Implementation -### Claude Process Manager +### Integration Modes + +The bot supports two integration modes with Claude: + +#### SDK Integration (Default - Recommended) +- Uses the Claude Code Python SDK for direct API integration +- Better performance with native async support +- Reliable streaming and error handling +- Can use existing Claude CLI authentication or direct API key +- Implementation in `src/claude/sdk_integration.py` + +#### CLI Integration (Legacy) +- Uses Claude Code CLI as a subprocess +- Requires Claude CLI installation and authentication +- Legacy mode for compatibility +- Implementation in `src/claude/integration.py` + +### Claude SDK Manager +```python +# src/claude/sdk_integration.py +""" +Claude Code Python SDK integration + +Features: +- Native async support +- Streaming responses +- Direct API integration +- CLI authentication support +""" + +import asyncio +from typing import AsyncIterator, Optional, Dict, Any +from claude_code_sdk import query, ClaudeCodeOptions + +@dataclass +class ClaudeResponse: + """Response from Claude Code SDK""" + content: str + session_id: str + cost: float + duration_ms: int + num_turns: int + is_error: bool = False + error_type: Optional[str] = None + tools_used: List[Dict[str, Any]] = field(default_factory=list) + +class ClaudeSDKManager: + """Manage Claude Code SDK integration""" + + def __init__(self, config: Settings): + self.config = config + self.options = ClaudeCodeOptions( + api_key=config.anthropic_api_key_str, + timeout=config.claude_timeout_seconds, + working_directory=config.approved_directory + ) + + async def execute_query( + self, + prompt: str, + working_directory: Path, + session_id: Optional[str] = None, + stream_callback: Optional[Callable] = None + ) -> ClaudeResponse: + """Execute Claude query using SDK""" + + try: + # Configure options for this query + options = self.options.copy() + options.working_directory = str(working_directory) + + # Execute with streaming + async for update in query(prompt, options): + if stream_callback: + await stream_callback(update) + + # Return final response + return self._format_response(update, session_id) + + except Exception as e: + return ClaudeResponse( + content=f"Error: {str(e)}", + session_id=session_id or "unknown", + cost=0.0, + duration_ms=0, + num_turns=0, + is_error=True, + error_type=type(e).__name__ + ) +``` + +### Claude Process Manager (CLI Mode) ```python # src/claude/integration.py """ diff --git a/poetry.lock b/poetry.lock index 70cb528a..0e9b7209 100644 --- a/poetry.lock +++ b/poetry.lock @@ -43,6 +43,31 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[[package]] +name = "anthropic" +version = "0.40.0" +description = "The official Python library for the anthropic API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "anthropic-0.40.0-py3-none-any.whl", hash = "sha256:442028ae8790ff9e3b6f8912043918755af1230d193904ae2ef78cc22995280c"}, + {file = "anthropic-0.40.0.tar.gz", hash = "sha256:3efeca6d9e97813f93ed34322c6c7ea2279bf0824cd0aa71b59ce222665e2b87"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + [[package]] name = "anyio" version = "4.9.0" @@ -125,6 +150,25 @@ files = [ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, ] +[[package]] +name = "claude-code-sdk" +version = "0.0.11" +description = "Python SDK for Claude Code" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "claude_code_sdk-0.0.11-py3-none-any.whl", hash = "sha256:d0ad485db36477c290ca9a4cc08f4a7eb795daf2d7646255e2f1475c28d19a0d"}, + {file = "claude_code_sdk-0.0.11.tar.gz", hash = "sha256:1711560b0a639cd4a766298e538bdf959dcb70b4854d003753306abc83806ce9"}, +] + +[package.dependencies] +anyio = ">=4.0.0" +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["anyio[trio] (>=4.0.0)", "mypy (>=1.0.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.20.0)", "pytest-cov (>=4.0.0)", "ruff (>=0.1.0)"] + [[package]] name = "click" version = "8.1.8" @@ -236,6 +280,18 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 [package.extras] toml = ["tomli ; python_full_version <= \"3.11.0a6\""] +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + [[package]] name = "exceptiongroup" version = "1.3.0" @@ -243,7 +299,7 @@ description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main", "dev"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, @@ -374,6 +430,93 @@ files = [ colors = ["colorama"] plugins = ["setuptools"] +[[package]] +name = "jiter" +version = "0.10.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "jiter-0.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2fb72b02478f06a900a5782de2ef47e0396b3e1f7d5aba30daeb1fce66f303"}, + {file = "jiter-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32bb468e3af278f095d3fa5b90314728a6916d89ba3d0ffb726dd9bf7367285e"}, + {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa8b3e0068c26ddedc7abc6fac37da2d0af16b921e288a5a613f4b86f050354f"}, + {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:286299b74cc49e25cd42eea19b72aa82c515d2f2ee12d11392c56d8701f52224"}, + {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ed5649ceeaeffc28d87fb012d25a4cd356dcd53eff5acff1f0466b831dda2a7"}, + {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2ab0051160cb758a70716448908ef14ad476c3774bd03ddce075f3c1f90a3d6"}, + {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03997d2f37f6b67d2f5c475da4412be584e1cec273c1cfc03d642c46db43f8cf"}, + {file = "jiter-0.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c404a99352d839fed80d6afd6c1d66071f3bacaaa5c4268983fc10f769112e90"}, + {file = "jiter-0.10.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66e989410b6666d3ddb27a74c7e50d0829704ede652fd4c858e91f8d64b403d0"}, + {file = "jiter-0.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b532d3af9ef4f6374609a3bcb5e05a1951d3bf6190dc6b176fdb277c9bbf15ee"}, + {file = "jiter-0.10.0-cp310-cp310-win32.whl", hash = "sha256:da9be20b333970e28b72edc4dff63d4fec3398e05770fb3205f7fb460eb48dd4"}, + {file = "jiter-0.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:f59e533afed0c5b0ac3eba20d2548c4a550336d8282ee69eb07b37ea526ee4e5"}, + {file = "jiter-0.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3bebe0c558e19902c96e99217e0b8e8b17d570906e72ed8a87170bc290b1e978"}, + {file = "jiter-0.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:558cc7e44fd8e507a236bee6a02fa17199ba752874400a0ca6cd6e2196cdb7dc"}, + {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d613e4b379a07d7c8453c5712ce7014e86c6ac93d990a0b8e7377e18505e98d"}, + {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f62cf8ba0618eda841b9bf61797f21c5ebd15a7a1e19daab76e4e4b498d515b2"}, + {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:919d139cdfa8ae8945112398511cb7fca58a77382617d279556b344867a37e61"}, + {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ddbc6ae311175a3b03bd8994881bc4635c923754932918e18da841632349db"}, + {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c440ea003ad10927a30521a9062ce10b5479592e8a70da27f21eeb457b4a9c5"}, + {file = "jiter-0.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc347c87944983481e138dea467c0551080c86b9d21de6ea9306efb12ca8f606"}, + {file = "jiter-0.10.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:13252b58c1f4d8c5b63ab103c03d909e8e1e7842d302473f482915d95fefd605"}, + {file = "jiter-0.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7d1bbf3c465de4a24ab12fb7766a0003f6f9bce48b8b6a886158c4d569452dc5"}, + {file = "jiter-0.10.0-cp311-cp311-win32.whl", hash = "sha256:db16e4848b7e826edca4ccdd5b145939758dadf0dc06e7007ad0e9cfb5928ae7"}, + {file = "jiter-0.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c9c1d5f10e18909e993f9641f12fe1c77b3e9b533ee94ffa970acc14ded3812"}, + {file = "jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b"}, + {file = "jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744"}, + {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2"}, + {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026"}, + {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c"}, + {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959"}, + {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a"}, + {file = "jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95"}, + {file = "jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea"}, + {file = "jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b"}, + {file = "jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01"}, + {file = "jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49"}, + {file = "jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644"}, + {file = "jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a"}, + {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6"}, + {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3"}, + {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2"}, + {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25"}, + {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041"}, + {file = "jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca"}, + {file = "jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4"}, + {file = "jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e"}, + {file = "jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d"}, + {file = "jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4"}, + {file = "jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca"}, + {file = "jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070"}, + {file = "jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca"}, + {file = "jiter-0.10.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:5e9251a5e83fab8d87799d3e1a46cb4b7f2919b895c6f4483629ed2446f66522"}, + {file = "jiter-0.10.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:023aa0204126fe5b87ccbcd75c8a0d0261b9abdbbf46d55e7ae9f8e22424eeb8"}, + {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c189c4f1779c05f75fc17c0c1267594ed918996a231593a21a5ca5438445216"}, + {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15720084d90d1098ca0229352607cd68256c76991f6b374af96f36920eae13c4"}, + {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f2fb68e5f1cfee30e2b2a09549a00683e0fde4c6a2ab88c94072fc33cb7426"}, + {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce541693355fc6da424c08b7edf39a2895f58d6ea17d92cc2b168d20907dee12"}, + {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31c50c40272e189d50006ad5c73883caabb73d4e9748a688b216e85a9a9ca3b9"}, + {file = "jiter-0.10.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fa3402a2ff9815960e0372a47b75c76979d74402448509ccd49a275fa983ef8a"}, + {file = "jiter-0.10.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:1956f934dca32d7bb647ea21d06d93ca40868b505c228556d3373cbd255ce853"}, + {file = "jiter-0.10.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:fcedb049bdfc555e261d6f65a6abe1d5ad68825b7202ccb9692636c70fcced86"}, + {file = "jiter-0.10.0-cp314-cp314-win32.whl", hash = "sha256:ac509f7eccca54b2a29daeb516fb95b6f0bd0d0d8084efaf8ed5dfc7b9f0b357"}, + {file = "jiter-0.10.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5ed975b83a2b8639356151cef5c0d597c68376fc4922b45d0eb384ac058cfa00"}, + {file = "jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5"}, + {file = "jiter-0.10.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bd6292a43c0fc09ce7c154ec0fa646a536b877d1e8f2f96c19707f65355b5a4d"}, + {file = "jiter-0.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:39de429dcaeb6808d75ffe9effefe96a4903c6a4b376b2f6d08d77c1aaee2f18"}, + {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ce124f13a7a616fad3bb723f2bfb537d78239d1f7f219566dc52b6f2a9e48d"}, + {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:166f3606f11920f9a1746b2eea84fa2c0a5d50fd313c38bdea4edc072000b0af"}, + {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28dcecbb4ba402916034fc14eba7709f250c4d24b0c43fc94d187ee0580af181"}, + {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86c5aa6910f9bebcc7bc4f8bc461aff68504388b43bfe5e5c0bd21efa33b52f4"}, + {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ceeb52d242b315d7f1f74b441b6a167f78cea801ad7c11c36da77ff2d42e8a28"}, + {file = "jiter-0.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ff76d8887c8c8ee1e772274fcf8cc1071c2c58590d13e33bd12d02dc9a560397"}, + {file = "jiter-0.10.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a9be4d0fa2b79f7222a88aa488bd89e2ae0a0a5b189462a12def6ece2faa45f1"}, + {file = "jiter-0.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab7fd8738094139b6c1ab1822d6f2000ebe41515c537235fd45dabe13ec9324"}, + {file = "jiter-0.10.0-cp39-cp39-win32.whl", hash = "sha256:5f51e048540dd27f204ff4a87f5d79294ea0aa3aa552aca34934588cf27023cf"}, + {file = "jiter-0.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b28302349dc65703a9e4ead16f163b1c339efffbe1049c30a44b001a2a4fff9"}, + {file = "jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500"}, +] + [[package]] name = "mccabe" version = "0.7.0" @@ -745,7 +888,6 @@ files = [ [package.dependencies] pytest = ">=8.2,<9" -typing-extensions = {version = ">=4.12", markers = "python_version < \"3.10\""} [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] @@ -928,5 +1070,5 @@ typing-extensions = ">=4.12.0" [metadata] lock-version = "2.1" -python-versions = "^3.9" -content-hash = "302277e68bad670e2773fcb6b7f286cd8b30d9faf9fbb407f36f2c9234318bf5" +python-versions = "^3.10" +content-hash = "69b93d764fbe3249dcbac0abbf4e0d32b02025575c998afe0c891a44fbf46096" diff --git a/pyproject.toml b/pyproject.toml index 8dd0f363..9a6a01c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -26,7 +25,7 @@ classifiers = [ packages = [{include = "src"}] [tool.poetry.dependencies] -python = "^3.9" +python = "^3.10" python-telegram-bot = "^22.1" structlog = "^25.4.0" pydantic = "^2.11.5" @@ -34,6 +33,8 @@ pydantic-settings = "^2.9.1" python-dotenv = "^1.0.0" aiofiles = "^24.1.0" aiosqlite = "^0.21.0" +anthropic = "^0.40.0" +claude-code-sdk = "^0.0.11" [tool.poetry.scripts] claude-telegram-bot = "src.main:run" @@ -54,7 +55,7 @@ mypy = "^1.16.0" [tool.black] line-length = 88 -target-version = ['py39'] +target-version = ['py310'] [tool.isort] profile = "black" @@ -67,7 +68,7 @@ addopts = "-v --cov=src --cov-report=html --cov-report=term-missing" asyncio_mode = "auto" [tool.mypy] -python_version = "3.9" +python_version = "3.10" warn_return_any = true warn_unused_configs = true disallow_untyped_defs = true diff --git a/src/bot/core.py b/src/bot/core.py index 6d4f8494..08916f67 100644 --- a/src/bot/core.py +++ b/src/bot/core.py @@ -23,6 +23,7 @@ from ..config.settings import Settings from ..exceptions import ClaudeCodeTelegramError +from .features.registry import FeatureRegistry logger = structlog.get_logger() @@ -36,6 +37,7 @@ def __init__(self, settings: Settings, dependencies: Dict[str, Any]): self.deps = dependencies self.app: Optional[Application] = None self.is_running = False + self.feature_registry: Optional[FeatureRegistry] = None async def initialize(self) -> None: """Initialize bot application.""" @@ -53,6 +55,16 @@ async def initialize(self) -> None: self.app = builder.build() + # Initialize feature registry + self.feature_registry = FeatureRegistry( + config=self.settings, + storage=self.deps.get("storage"), + security=self.deps.get("security"), + ) + + # Add feature registry to dependencies + self.deps["features"] = self.feature_registry + # Set bot commands for menu await self._set_bot_commands() @@ -80,6 +92,8 @@ async def _set_bot_commands(self) -> None: BotCommand("projects", "Show all projects"), BotCommand("status", "Show session status"), BotCommand("export", "Export current session"), + BotCommand("actions", "Show quick actions"), + BotCommand("git", "Git repository commands"), ] await self.app.bot.set_my_commands(commands) @@ -102,6 +116,8 @@ def _register_handlers(self) -> None: ("projects", command.show_projects), ("status", command.session_status), ("export", command.export_session), + ("actions", command.quick_actions), + ("git", command.git_command), ] for cmd, handler in handlers: @@ -257,6 +273,10 @@ async def stop(self) -> None: try: self.is_running = False # Stop the main loop first + # Shutdown feature registry + if self.feature_registry: + self.feature_registry.shutdown() + if self.app: # Stop the updater if it's running if self.app.updater.running: diff --git a/src/bot/features/__init__.py b/src/bot/features/__init__.py new file mode 100644 index 00000000..c05ab3a8 --- /dev/null +++ b/src/bot/features/__init__.py @@ -0,0 +1,12 @@ +"""Bot features package""" + +from .conversation_mode import ConversationContext, ConversationEnhancer +from .file_handler import CodebaseAnalysis, FileHandler, ProcessedFile + +__all__ = [ + "FileHandler", + "ProcessedFile", + "CodebaseAnalysis", + "ConversationEnhancer", + "ConversationContext", +] diff --git a/src/bot/features/conversation_mode.py b/src/bot/features/conversation_mode.py new file mode 100644 index 00000000..20953883 --- /dev/null +++ b/src/bot/features/conversation_mode.py @@ -0,0 +1,383 @@ +"""Enhanced conversation features. + +This module implements the Conversation Enhancement feature from TODO-7, providing: + +Features: +- Context preservation across conversation turns +- Intelligent follow-up suggestions based on tools used and content +- Code execution tracking and analysis +- Interactive conversation controls with inline keyboards +- Smart suggestion prioritization + +Core Components: +- ConversationContext: Tracks conversation state and metadata +- ConversationEnhancer: Main class for generating suggestions and formatting responses + +The implementation analyzes Claude's responses to generate contextually relevant +follow-up suggestions, making it easier for users to continue productive conversations +with actionable next steps. + +Usage: + enhancer = ConversationEnhancer() + enhancer.update_context(user_id, claude_response) + suggestions = enhancer.generate_follow_up_suggestions(response, context) + keyboard = enhancer.create_follow_up_keyboard(suggestions) +""" + +from dataclasses import dataclass, field +from typing import Dict, List, Optional + +import structlog +from telegram import InlineKeyboardButton, InlineKeyboardMarkup + +from ...claude.integration import ClaudeResponse + +logger = structlog.get_logger() + + +@dataclass +class ConversationContext: + """Context information for a conversation.""" + + user_id: int + session_id: Optional[str] = None + project_path: Optional[str] = None + last_tools_used: List[str] = field(default_factory=list) + last_response_content: str = "" + conversation_turn: int = 0 + has_errors: bool = False + active_files: List[str] = field(default_factory=list) + todo_count: int = 0 + + def update_from_response(self, response: ClaudeResponse) -> None: + """Update context from Claude response.""" + self.session_id = response.session_id + self.last_response_content = response.content.lower() + self.conversation_turn += 1 + self.has_errors = response.is_error or "error" in self.last_response_content + + # Extract tools used + self.last_tools_used = [tool.get("name", "") for tool in response.tools_used] + + # Update active files if file tools were used + if any(tool in self.last_tools_used for tool in ["Edit", "Write", "Read"]): + # In a real implementation, we'd parse the tool outputs to get file names + # For now, we'll track that file operations occurred + pass + + # Count TODOs/FIXMEs in response + todo_keywords = ["todo", "fixme", "note", "hack", "bug"] + self.todo_count = sum( + 1 for keyword in todo_keywords if keyword in self.last_response_content + ) + + +class ConversationEnhancer: + """Enhance conversation experience.""" + + def __init__(self) -> None: + """Initialize conversation enhancer.""" + self.conversation_contexts: Dict[int, ConversationContext] = {} + + def get_or_create_context(self, user_id: int) -> ConversationContext: + """Get or create conversation context for user.""" + if user_id not in self.conversation_contexts: + self.conversation_contexts[user_id] = ConversationContext(user_id=user_id) + + return self.conversation_contexts[user_id] + + def update_context(self, user_id: int, response: ClaudeResponse) -> None: + """Update conversation context with response.""" + context = self.get_or_create_context(user_id) + context.update_from_response(response) + + logger.debug( + "Updated conversation context", + user_id=user_id, + session_id=context.session_id, + turn=context.conversation_turn, + tools_used=context.last_tools_used, + ) + + def generate_follow_up_suggestions( + self, response: ClaudeResponse, context: ConversationContext + ) -> List[str]: + """Generate relevant follow-up suggestions.""" + suggestions = [] + + # Based on tools used + tools_used = [tool.get("name", "") for tool in response.tools_used] + + if "Write" in tools_used or "MultiEdit" in tools_used: + suggestions.extend( + [ + "Add tests for the new code", + "Create documentation for this", + "Review the implementation", + ] + ) + + if "Edit" in tools_used: + suggestions.extend( + [ + "Review the changes made", + "Run tests to verify changes", + "Check for any side effects", + ] + ) + + if "Read" in tools_used: + suggestions.extend( + [ + "Explain how this code works", + "Suggest improvements", + "Add error handling", + ] + ) + + if "Bash" in tools_used: + suggestions.extend( + [ + "Explain the command output", + "Run additional related commands", + "Check for any issues", + ] + ) + + if "Glob" in tools_used or "Grep" in tools_used: + suggestions.extend( + [ + "Analyze the search results", + "Look into specific files found", + "Create a summary of findings", + ] + ) + + # Based on response content analysis + content_lower = response.content.lower() + + if "error" in content_lower or "failed" in content_lower: + suggestions.extend( + [ + "Help me debug this error", + "Suggest alternative approaches", + "Check the logs for more details", + ] + ) + + if "todo" in content_lower or "fixme" in content_lower: + suggestions.extend( + [ + "Complete the TODO items", + "Prioritize the tasks", + "Create an action plan", + ] + ) + + if "test" in content_lower and ( + "fail" in content_lower or "error" in content_lower + ): + suggestions.extend( + [ + "Fix the failing tests", + "Update test expectations", + "Add more test coverage", + ] + ) + + if "install" in content_lower or "dependency" in content_lower: + suggestions.extend( + [ + "Verify the installation", + "Check for version conflicts", + "Update package documentation", + ] + ) + + if "git" in content_lower: + suggestions.extend( + [ + "Review the git status", + "Check commit history", + "Create a commit with changes", + ] + ) + + # Based on conversation context + if context.conversation_turn > 1: + suggestions.append("Continue with the next step") + + if context.has_errors: + suggestions.extend( + ["Investigate the error further", "Try a different approach"] + ) + + if context.todo_count > 0: + suggestions.append("Address the TODO items") + + # General suggestions based on development patterns + if any(keyword in content_lower for keyword in ["function", "class", "method"]): + suggestions.extend( + ["Add unit tests", "Improve documentation", "Add type hints"] + ) + + if "performance" in content_lower or "optimize" in content_lower: + suggestions.extend( + [ + "Profile the performance", + "Benchmark the changes", + "Monitor resource usage", + ] + ) + + # Remove duplicates and limit to most relevant + unique_suggestions = list(dict.fromkeys(suggestions)) + + # Prioritize based on tools used and content + prioritized = [] + + # High priority: error handling and fixes + for suggestion in unique_suggestions: + if any( + keyword in suggestion.lower() for keyword in ["error", "debug", "fix"] + ): + prioritized.append(suggestion) + + # Medium priority: development workflow + for suggestion in unique_suggestions: + if suggestion not in prioritized and any( + keyword in suggestion.lower() + for keyword in ["test", "review", "verify"] + ): + prioritized.append(suggestion) + + # Lower priority: enhancements + for suggestion in unique_suggestions: + if suggestion not in prioritized: + prioritized.append(suggestion) + + # Return top 3-4 most relevant suggestions + return prioritized[:4] + + def create_follow_up_keyboard(self, suggestions: List[str]) -> InlineKeyboardMarkup: + """Create keyboard with follow-up suggestions.""" + if not suggestions: + return InlineKeyboardMarkup([]) + + keyboard = [] + + # Add suggestion buttons (max 4, in rows of 1 for better mobile experience) + for suggestion in suggestions[:4]: + # Create a shorter hash for callback data + suggestion_hash = str(hash(suggestion) % 1000000) + keyboard.append( + [ + InlineKeyboardButton( + f"💡 {suggestion}", callback_data=f"followup:{suggestion_hash}" + ) + ] + ) + + # Add control buttons + keyboard.append( + [ + InlineKeyboardButton( + "✅ Continue Coding", callback_data="conversation:continue" + ), + InlineKeyboardButton( + "🛑 End Session", callback_data="conversation:end" + ), + ] + ) + + return InlineKeyboardMarkup(keyboard) + + def should_show_suggestions(self, response: ClaudeResponse) -> bool: + """Determine if follow-up suggestions should be shown.""" + # Don't show suggestions for errors + if response.is_error: + return False + + # Show suggestions if tools were used + if response.tools_used: + return True + + # Show suggestions for longer responses (likely more substantial) + if len(response.content) > 200: + return True + + # Show suggestions if response contains actionable content + actionable_keywords = [ + "todo", + "fixme", + "next", + "consider", + "you can", + "you could", + "try", + "test", + "check", + "verify", + "review", + ] + + content_lower = response.content.lower() + return any(keyword in content_lower for keyword in actionable_keywords) + + def format_response_with_suggestions( + self, + response: ClaudeResponse, + context: ConversationContext, + max_content_length: int = 3000, + ) -> tuple[str, Optional[InlineKeyboardMarkup]]: + """Format response with follow-up suggestions.""" + # Truncate content if too long for Telegram + content = response.content + if len(content) > max_content_length: + content = content[:max_content_length] + "\n\n... _(response truncated)_" + + # Add session info if this is a new session + if context.conversation_turn == 1 and response.session_id: + session_info = f"\n\n🆔 **Session:** `{response.session_id[:8]}...`" + content += session_info + + # Add cost info if significant + if response.cost > 0.01: + cost_info = f"\n\n💰 **Cost:** ${response.cost:.4f}" + content += cost_info + + # Generate follow-up suggestions + keyboard = None + if self.should_show_suggestions(response): + suggestions = self.generate_follow_up_suggestions(response, context) + if suggestions: + keyboard = self.create_follow_up_keyboard(suggestions) + logger.debug( + "Generated follow-up suggestions", + user_id=context.user_id, + suggestions=suggestions, + ) + + return content, keyboard + + def clear_context(self, user_id: int) -> None: + """Clear conversation context for user.""" + if user_id in self.conversation_contexts: + del self.conversation_contexts[user_id] + logger.debug("Cleared conversation context", user_id=user_id) + + def get_context_summary(self, user_id: int) -> Optional[Dict]: + """Get summary of conversation context.""" + context = self.conversation_contexts.get(user_id) + if not context: + return None + + return { + "session_id": context.session_id, + "project_path": context.project_path, + "conversation_turn": context.conversation_turn, + "last_tools_used": context.last_tools_used, + "has_errors": context.has_errors, + "todo_count": context.todo_count, + "active_files_count": len(context.active_files), + } diff --git a/src/bot/features/file_handler.py b/src/bot/features/file_handler.py new file mode 100644 index 00000000..5454c065 --- /dev/null +++ b/src/bot/features/file_handler.py @@ -0,0 +1,521 @@ +""" +Advanced file handling + +Features: +- Multiple file processing +- Zip archive extraction +- Code analysis +- Diff generation +""" + +import shutil +import tarfile +import uuid +import zipfile +from collections import defaultdict +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List + +from telegram import Document + +from src.config import Settings +from src.security.validators import SecurityValidator + + +@dataclass +class ProcessedFile: + """Processed file result""" + + type: str + prompt: str + metadata: Dict[str, any] + + +@dataclass +class CodebaseAnalysis: + """Codebase analysis result""" + + languages: Dict[str, int] + frameworks: List[str] + entry_points: List[str] + todo_count: int + test_coverage: bool + file_stats: Dict[str, int] + + +class FileHandler: + """Handle various file operations""" + + def __init__(self, config: Settings, security: SecurityValidator): + self.config = config + self.security = security + self.temp_dir = Path("/tmp/claude_bot_files") + self.temp_dir.mkdir(exist_ok=True) + + # Supported code extensions + self.code_extensions = { + ".py", + ".js", + ".ts", + ".jsx", + ".tsx", + ".java", + ".cpp", + ".c", + ".h", + ".go", + ".rs", + ".rb", + ".php", + ".swift", + ".kt", + ".scala", + ".r", + ".jl", + ".lua", + ".pl", + ".sh", + ".bash", + ".zsh", + ".fish", + ".ps1", + ".sql", + ".html", + ".css", + ".scss", + ".sass", + ".less", + ".vue", + ".yaml", + ".yml", + ".json", + ".xml", + ".toml", + ".ini", + ".cfg", + ".dockerfile", + ".makefile", + ".cmake", + ".gradle", + ".maven", + } + + # Language mapping + self.language_map = { + ".py": "Python", + ".js": "JavaScript", + ".ts": "TypeScript", + ".java": "Java", + ".cpp": "C++", + ".c": "C", + ".go": "Go", + ".rs": "Rust", + ".rb": "Ruby", + ".php": "PHP", + ".swift": "Swift", + ".kt": "Kotlin", + ".scala": "Scala", + ".r": "R", + ".jl": "Julia", + ".lua": "Lua", + ".pl": "Perl", + ".sh": "Shell", + ".sql": "SQL", + ".html": "HTML", + ".css": "CSS", + ".vue": "Vue", + ".yaml": "YAML", + ".json": "JSON", + ".xml": "XML", + } + + async def handle_document_upload( + self, document: Document, user_id: int, context: str = "" + ) -> ProcessedFile: + """Process uploaded document""" + + # Download file + file_path = await self._download_file(document) + + try: + # Detect file type + file_type = self._detect_file_type(file_path) + + # Process based on type + if file_type == "archive": + return await self._process_archive(file_path, context) + elif file_type == "code": + return await self._process_code_file(file_path, context) + elif file_type == "text": + return await self._process_text_file(file_path, context) + else: + raise ValueError(f"Unsupported file type: {file_type}") + + finally: + # Cleanup + file_path.unlink(missing_ok=True) + + async def _download_file(self, document: Document) -> Path: + """Download file from Telegram""" + # Get file + file = await document.get_file() + + # Create temp file path + file_name = document.file_name or f"file_{uuid.uuid4()}" + file_path = self.temp_dir / file_name + + # Download to path + await file.download_to_drive(str(file_path)) + + return file_path + + def _detect_file_type(self, file_path: Path) -> str: + """Detect file type based on extension and content""" + ext = file_path.suffix.lower() + + # Check if archive + if ext in {".zip", ".tar", ".gz", ".bz2", ".xz", ".7z"}: + return "archive" + + # Check if code + if ext in self.code_extensions: + return "code" + + # Check if text + try: + with open(file_path, "r", encoding="utf-8") as f: + f.read(1024) # Try reading first 1KB + return "text" + except (UnicodeDecodeError, IOError): + return "binary" + + async def _process_archive(self, archive_path: Path, context: str) -> ProcessedFile: + """Extract and analyze archive contents""" + + # Create extraction directory + extract_dir = self.temp_dir / f"extract_{uuid.uuid4()}" + extract_dir.mkdir() + + try: + # Extract based on type + if archive_path.suffix == ".zip": + with zipfile.ZipFile(archive_path) as zf: + # Security check - prevent zip bombs + total_size = sum(f.file_size for f in zf.filelist) + if total_size > 100 * 1024 * 1024: # 100MB limit + raise ValueError("Archive too large") + + # Extract with security checks + for file_info in zf.filelist: + # Prevent path traversal + file_path = Path(file_info.filename) + if file_path.is_absolute() or ".." in file_path.parts: + continue + + # Extract file + target_path = extract_dir / file_path + target_path.parent.mkdir(parents=True, exist_ok=True) + + with ( + zf.open(file_info) as source, + open(target_path, "wb") as target, + ): + shutil.copyfileobj(source, target) + + elif archive_path.suffix in {".tar", ".gz", ".bz2", ".xz"}: + with tarfile.open(archive_path) as tf: + # Security checks + total_size = sum(member.size for member in tf.getmembers()) + if total_size > 100 * 1024 * 1024: # 100MB limit + raise ValueError("Archive too large") + + # Extract with security checks + for member in tf.getmembers(): + # Prevent path traversal + if member.name.startswith("/") or ".." in member.name: + continue + + tf.extract(member, extract_dir) + + # Analyze contents + file_tree = self._build_file_tree(extract_dir) + code_files = self._find_code_files(extract_dir) + + # Create analysis prompt + prompt = f"{context}\n\nProject structure:\n{file_tree}\n\n" + + # Add key files + for file_path in code_files[:5]: # Limit to 5 files + content = file_path.read_text(encoding="utf-8", errors="ignore") + prompt += f"\nFile: {file_path.relative_to(extract_dir)}\n```\n{content[:1000]}...\n```\n" + + return ProcessedFile( + type="archive", + prompt=prompt, + metadata={ + "file_count": len(list(extract_dir.rglob("*"))), + "code_files": len(code_files), + }, + ) + + finally: + # Cleanup + shutil.rmtree(extract_dir, ignore_errors=True) + + async def _process_code_file(self, file_path: Path, context: str) -> ProcessedFile: + """Process single code file""" + content = file_path.read_text(encoding="utf-8", errors="ignore") + + # Detect language + language = self._detect_language(file_path.suffix) + + # Create prompt + prompt = f"{context}\n\nFile: {file_path.name}\nLanguage: {language}\n\n```{language.lower()}\n{content}\n```" + + return ProcessedFile( + type="code", + prompt=prompt, + metadata={ + "language": language, + "lines": len(content.splitlines()), + "size": file_path.stat().st_size, + }, + ) + + async def _process_text_file(self, file_path: Path, context: str) -> ProcessedFile: + """Process text file""" + content = file_path.read_text(encoding="utf-8", errors="ignore") + + # Create prompt + prompt = f"{context}\n\nFile: {file_path.name}\n\n{content}" + + return ProcessedFile( + type="text", + prompt=prompt, + metadata={ + "lines": len(content.splitlines()), + "size": file_path.stat().st_size, + }, + ) + + def _build_file_tree(self, directory: Path, prefix: str = "") -> str: + """Build visual file tree""" + items = sorted(directory.iterdir(), key=lambda x: (x.is_file(), x.name)) + tree_lines = [] + + for i, item in enumerate(items): + is_last = i == len(items) - 1 + current_prefix = "└── " if is_last else "├── " + + if item.is_dir(): + tree_lines.append(f"{prefix}{current_prefix}{item.name}/") + # Recursive call with updated prefix + sub_prefix = prefix + (" " if is_last else "│ ") + tree_lines.append(self._build_file_tree(item, sub_prefix)) + else: + size = item.stat().st_size + tree_lines.append( + f"{prefix}{current_prefix}{item.name} ({self._format_size(size)})" + ) + + return "\n".join(filter(None, tree_lines)) + + def _format_size(self, size: int) -> str: + """Format file size for display""" + for unit in ["B", "KB", "MB", "GB"]: + if size < 1024.0: + return f"{size:.1f}{unit}" + size /= 1024.0 + return f"{size:.1f}TB" + + def _find_code_files(self, directory: Path) -> List[Path]: + """Find all code files in directory""" + code_files = [] + + for file_path in directory.rglob("*"): + if file_path.is_file() and file_path.suffix.lower() in self.code_extensions: + # Skip common non-code directories + if any( + part in file_path.parts + for part in ["node_modules", "__pycache__", ".git", "dist", "build"] + ): + continue + code_files.append(file_path) + + # Sort by importance (main files first, then by name) + def sort_key(path: Path) -> tuple: + name = path.name.lower() + # Prioritize main/index files + if name in [ + "main.py", + "index.js", + "app.py", + "server.py", + "main.go", + "main.rs", + ]: + return (0, name) + elif name.startswith("index."): + return (1, name) + elif name.startswith("main."): + return (2, name) + else: + return (3, name) + + code_files.sort(key=sort_key) + return code_files + + def _detect_language(self, extension: str) -> str: + """Detect programming language from extension""" + return self.language_map.get(extension.lower(), "text") + + async def analyze_codebase(self, directory: Path) -> CodebaseAnalysis: + """Analyze entire codebase""" + + analysis = CodebaseAnalysis( + languages={}, + frameworks=[], + entry_points=[], + todo_count=0, + test_coverage=False, + file_stats={}, + ) + + # Language detection + language_stats = defaultdict(int) + file_extensions = defaultdict(int) + + for file_path in directory.rglob("*"): + if file_path.is_file(): + ext = file_path.suffix.lower() + file_extensions[ext] += 1 + + language = self._detect_language(ext) + if language and language != "text": + language_stats[language] += 1 + + analysis.languages = dict(language_stats) + analysis.file_stats = dict(file_extensions) + + # Find entry points + analysis.entry_points = self._find_entry_points(directory) + + # Detect frameworks + analysis.frameworks = self._detect_frameworks(directory) + + # Find TODOs and FIXMEs + analysis.todo_count = await self._find_todos(directory) + + # Check for tests + test_files = self._find_test_files(directory) + analysis.test_coverage = len(test_files) > 0 + + return analysis + + def _find_entry_points(self, directory: Path) -> List[str]: + """Find likely entry points in the codebase""" + entry_points = [] + + # Common entry point patterns + patterns = [ + "main.py", + "app.py", + "server.py", + "__main__.py", + "index.js", + "app.js", + "server.js", + "main.js", + "main.go", + "main.rs", + "main.cpp", + "main.c", + "Main.java", + "App.java", + "index.php", + "index.html", + ] + + for pattern in patterns: + for file_path in directory.rglob(pattern): + if file_path.is_file(): + entry_points.append(str(file_path.relative_to(directory))) + + return entry_points + + def _detect_frameworks(self, directory: Path) -> List[str]: + """Detect frameworks and libraries used""" + frameworks = [] + + # Framework indicators + indicators = { + "package.json": ["React", "Vue", "Angular", "Express", "Next.js"], + "requirements.txt": ["Django", "Flask", "FastAPI", "PyTorch", "TensorFlow"], + "Cargo.toml": ["Tokio", "Actix", "Rocket"], + "go.mod": ["Gin", "Echo", "Fiber"], + "pom.xml": ["Spring", "Maven"], + "build.gradle": ["Spring", "Gradle"], + "composer.json": ["Laravel", "Symfony"], + "Gemfile": ["Rails", "Sinatra"], + } + + for indicator_file, possible_frameworks in indicators.items(): + file_path = directory / indicator_file + if file_path.exists(): + content = file_path.read_text(encoding="utf-8", errors="ignore").lower() + for framework in possible_frameworks: + if framework.lower() in content: + frameworks.append(framework) + + # Check for specific framework files + if (directory / "manage.py").exists(): + frameworks.append("Django") + if (directory / "artisan").exists(): + frameworks.append("Laravel") + if (directory / "next.config.js").exists(): + frameworks.append("Next.js") + + return list(set(frameworks)) # Remove duplicates + + async def _find_todos(self, directory: Path) -> int: + """Count TODO and FIXME comments""" + todo_count = 0 + + for file_path in directory.rglob("*"): + if file_path.is_file() and file_path.suffix.lower() in self.code_extensions: + try: + content = file_path.read_text(encoding="utf-8", errors="ignore") + # Count TODOs and FIXMEs + todo_count += content.upper().count("TODO") + todo_count += content.upper().count("FIXME") + except Exception: + continue + + return todo_count + + def _find_test_files(self, directory: Path) -> List[Path]: + """Find test files in the codebase""" + test_files = [] + + # Common test patterns + test_patterns = [ + "test_*.py", + "*_test.py", + "*_test.go", + "*.test.js", + "*.spec.js", + "*.test.ts", + "*.spec.ts", + ] + + for pattern in test_patterns: + test_files.extend(directory.rglob(pattern)) + + # Check test directories + for test_dir_name in ["test", "tests", "__tests__", "spec"]: + test_dir = directory / test_dir_name + if test_dir.exists() and test_dir.is_dir(): + test_files.extend(test_dir.rglob("*")) + + return [f for f in test_files if f.is_file()] diff --git a/src/bot/features/git_integration.py b/src/bot/features/git_integration.py new file mode 100644 index 00000000..ba944181 --- /dev/null +++ b/src/bot/features/git_integration.py @@ -0,0 +1,420 @@ +"""Git integration for safe repository operations.""" + +import asyncio +import logging +import re +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path +from typing import List, Optional, Set, Tuple + +from src.config.settings import Settings +from src.exceptions import SecurityError + +logger = logging.getLogger(__name__) + + +class GitError(Exception): + """Git operation error.""" + + pass + + +@dataclass +class GitStatus: + """Git repository status.""" + + branch: str + modified: List[str] + added: List[str] + deleted: List[str] + untracked: List[str] + ahead: int + behind: int + + @property + def is_clean(self) -> bool: + """Check if working directory is clean.""" + return not any([self.modified, self.added, self.deleted, self.untracked]) + + +@dataclass +class CommitInfo: + """Git commit information.""" + + hash: str + author: str + date: datetime + message: str + files_changed: int + insertions: int + deletions: int + + +class GitIntegration: + """Safe git integration for repositories.""" + + # Safe git commands allowed + SAFE_COMMANDS: Set[str] = { + "status", + "log", + "diff", + "branch", + "remote", + "show", + "ls-files", + "ls-tree", + "rev-parse", + "describe", + } + + # Dangerous patterns to block + DANGEROUS_PATTERNS = [ + r"--exec", + r"--upload-pack", + r"--receive-pack", + r"-c\s*core\.gitProxy", + r"-c\s*core\.sshCommand", + ] + + def __init__(self, settings: Settings): + """Initialize git integration. + + Args: + settings: Application settings + """ + self.settings = settings + self.approved_dir = Path(settings.approved_directory) + + async def execute_git_command( + self, command: List[str], cwd: Path + ) -> Tuple[str, str]: + """Execute safe git command. + + Args: + command: Git command parts + cwd: Working directory + + Returns: + Tuple of (stdout, stderr) + + Raises: + SecurityError: If command is unsafe + GitError: If git command fails + """ + # Validate command safety + if not command or command[0] != "git": + raise SecurityError("Only git commands allowed") + + if len(command) < 2 or command[1] not in self.SAFE_COMMANDS: + raise SecurityError(f"Unsafe git command: {command[1]}") + + # Check for dangerous patterns + cmd_str = " ".join(command) + for pattern in self.DANGEROUS_PATTERNS: + if re.search(pattern, cmd_str, re.IGNORECASE): + raise SecurityError(f"Dangerous pattern detected: {pattern}") + + # Validate working directory + try: + cwd = cwd.resolve() + if not cwd.is_relative_to(self.approved_dir): + raise SecurityError("Repository outside approved directory") + except Exception: + raise SecurityError("Invalid repository path") + + # Execute command + try: + process = await asyncio.create_subprocess_exec( + *command, + cwd=cwd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + stdout, stderr = await process.communicate() + + if process.returncode != 0: + raise GitError(f"Git command failed: {stderr.decode()}") + + return stdout.decode(), stderr.decode() + + except asyncio.TimeoutError: + raise GitError("Git command timed out") + except Exception as e: + logger.error(f"Git command error: {e}") + raise GitError(f"Failed to execute git command: {e}") + + async def get_status(self, repo_path: Path) -> GitStatus: + """Get repository status. + + Args: + repo_path: Repository path + + Returns: + Git status information + """ + # Get branch and tracking info + branch_out, _ = await self.execute_git_command( + ["git", "branch", "--show-current"], repo_path + ) + branch = branch_out.strip() or "HEAD" + + # Get file status + status_out, _ = await self.execute_git_command( + ["git", "status", "--porcelain=v1"], repo_path + ) + + modified = [] + added = [] + deleted = [] + untracked = [] + + for line in status_out.strip().split("\n"): + if not line: + continue + + status = line[:2] + filename = line[3:] + + if status == "??": + untracked.append(filename) + elif "M" in status: + modified.append(filename) + elif "A" in status: + added.append(filename) + elif "D" in status: + deleted.append(filename) + + # Get ahead/behind counts + ahead = behind = 0 + try: + # Try to get upstream tracking info + rev_out, _ = await self.execute_git_command( + ["git", "rev-list", "--count", "--left-right", "HEAD...@{upstream}"], + repo_path, + ) + if rev_out.strip(): + parts = rev_out.strip().split("\t") + if len(parts) == 2: + ahead = int(parts[0]) + behind = int(parts[1]) + except GitError: + # No upstream configured + pass + + return GitStatus( + branch=branch, + modified=modified, + added=added, + deleted=deleted, + untracked=untracked, + ahead=ahead, + behind=behind, + ) + + async def get_diff( + self, repo_path: Path, staged: bool = False, file_path: Optional[str] = None + ) -> str: + """Get repository diff. + + Args: + repo_path: Repository path + staged: Show staged changes + file_path: Specific file to diff + + Returns: + Formatted diff output + """ + command = ["git", "diff"] + + if staged: + command.append("--staged") + + # Add formatting options + command.extend(["--no-color", "--minimal"]) + + if file_path: + # Validate file path + file_path_obj = (repo_path / file_path).resolve() + if not file_path_obj.is_relative_to(repo_path): + raise SecurityError("File path outside repository") + command.append(file_path) + + diff_out, _ = await self.execute_git_command(command, repo_path) + + if not diff_out.strip(): + return "No changes to show" + + # Format diff with indicators + lines = [] + for line in diff_out.split("\n"): + if line.startswith("+") and not line.startswith("+++"): + lines.append(f"➕ {line[1:]}") + elif line.startswith("-") and not line.startswith("---"): + lines.append(f"➖ {line[1:]}") + elif line.startswith("@@"): + lines.append(f"📍 {line}") + else: + lines.append(line) + + return "\n".join(lines) + + async def get_file_history( + self, repo_path: Path, file_path: str, limit: int = 10 + ) -> List[CommitInfo]: + """Get file commit history. + + Args: + repo_path: Repository path + file_path: File to get history for + limit: Maximum commits to return + + Returns: + List of commit information + """ + # Validate file path + file_path_obj = (repo_path / file_path).resolve() + if not file_path_obj.is_relative_to(repo_path): + raise SecurityError("File path outside repository") + + # Get commit log with stats + log_out, _ = await self.execute_git_command( + [ + "git", + "log", + f"--max-count={limit}", + "--pretty=format:%H|%an|%aI|%s", + "--numstat", + "--", + file_path, + ], + repo_path, + ) + + commits = [] + current_commit = None + + for line in log_out.strip().split("\n"): + if not line: + continue + + if "|" in line and len(line.split("|")) == 4: + # Commit info line + parts = line.split("|") + + if current_commit: + commits.append(current_commit) + + current_commit = CommitInfo( + hash=parts[0][:8], # Short hash + author=parts[1], + date=datetime.fromisoformat(parts[2].replace("Z", "+00:00")), + message=parts[3], + files_changed=0, + insertions=0, + deletions=0, + ) + elif current_commit and "\t" in line: + # Numstat line + parts = line.split("\t") + if len(parts) == 3: + try: + insertions = int(parts[0]) if parts[0] != "-" else 0 + deletions = int(parts[1]) if parts[1] != "-" else 0 + current_commit.insertions += insertions + current_commit.deletions += deletions + current_commit.files_changed += 1 + except ValueError: + pass + + if current_commit: + commits.append(current_commit) + + return commits + + def format_status(self, status: GitStatus) -> str: + """Format git status for display. + + Args: + status: Git status object + + Returns: + Formatted status string + """ + lines = [f"🌿 Branch: {status.branch}"] + + # Add tracking info + if status.ahead or status.behind: + tracking = [] + if status.ahead: + tracking.append(f"↑{status.ahead}") + if status.behind: + tracking.append(f"↓{status.behind}") + lines.append(f"📊 Tracking: {' '.join(tracking)}") + + if status.is_clean: + lines.append("✅ Working tree clean") + else: + if status.modified: + lines.append(f"📝 Modified: {len(status.modified)} files") + for f in status.modified[:5]: # Show first 5 + lines.append(f" • {f}") + if len(status.modified) > 5: + lines.append(f" ... and {len(status.modified) - 5} more") + + if status.added: + lines.append(f"➕ Added: {len(status.added)} files") + for f in status.added[:5]: + lines.append(f" • {f}") + if len(status.added) > 5: + lines.append(f" ... and {len(status.added) - 5} more") + + if status.deleted: + lines.append(f"➖ Deleted: {len(status.deleted)} files") + for f in status.deleted[:5]: + lines.append(f" • {f}") + if len(status.deleted) > 5: + lines.append(f" ... and {len(status.deleted) - 5} more") + + if status.untracked: + lines.append(f"❓ Untracked: {len(status.untracked)} files") + for f in status.untracked[:5]: + lines.append(f" • {f}") + if len(status.untracked) > 5: + lines.append(f" ... and {len(status.untracked) - 5} more") + + return "\n".join(lines) + + def format_history(self, commits: List[CommitInfo]) -> str: + """Format commit history for display. + + Args: + commits: List of commits + + Returns: + Formatted history string + """ + if not commits: + return "No commit history found" + + lines = ["📜 Commit History:"] + + for commit in commits: + lines.append( + f"\n🔹 {commit.hash} - {commit.date.strftime('%Y-%m-%d %H:%M')}" + ) + lines.append(f" 👤 {commit.author}") + lines.append(f" 💬 {commit.message}") + + if commit.files_changed: + stats = [] + if commit.insertions: + stats.append(f"+{commit.insertions}") + if commit.deletions: + stats.append(f"-{commit.deletions}") + lines.append( + f" 📊 {commit.files_changed} files changed, {' '.join(stats)}" + ) + + return "\n".join(lines) diff --git a/src/bot/features/image_handler.py b/src/bot/features/image_handler.py new file mode 100644 index 00000000..f9cf3798 --- /dev/null +++ b/src/bot/features/image_handler.py @@ -0,0 +1,181 @@ +""" +Handle image uploads for UI/screenshot analysis + +Features: +- OCR for text extraction +- UI element detection +- Image description +- Diagram analysis +""" + +import base64 +from dataclasses import dataclass +from typing import Dict, Optional + +from telegram import PhotoSize + +from src.config import Settings + + +@dataclass +class ProcessedImage: + """Processed image result""" + + prompt: str + image_type: str + base64_data: str + size: int + metadata: Dict[str, any] = None + + +class ImageHandler: + """Process image uploads""" + + def __init__(self, config: Settings): + self.config = config + self.supported_formats = {".png", ".jpg", ".jpeg", ".gif", ".webp"} + + async def process_image( + self, photo: PhotoSize, caption: Optional[str] = None + ) -> ProcessedImage: + """Process uploaded image""" + + # Download image + file = await photo.get_file() + image_bytes = await file.download_as_bytearray() + + # Detect image type + image_type = self._detect_image_type(image_bytes) + + # Create appropriate prompt + if image_type == "screenshot": + prompt = self._create_screenshot_prompt(caption) + elif image_type == "diagram": + prompt = self._create_diagram_prompt(caption) + elif image_type == "ui_mockup": + prompt = self._create_ui_prompt(caption) + else: + prompt = self._create_generic_prompt(caption) + + # Convert to base64 for Claude (if supported in future) + base64_image = base64.b64encode(image_bytes).decode("utf-8") + + return ProcessedImage( + prompt=prompt, + image_type=image_type, + base64_data=base64_image, + size=len(image_bytes), + metadata={ + "format": self._detect_format(image_bytes), + "has_caption": caption is not None, + }, + ) + + def _detect_image_type(self, image_bytes: bytes) -> str: + """Detect type of image""" + # Simple heuristic based on image characteristics + # In practice, could use ML model for better detection + + # For now, return generic type + return "screenshot" + + def _detect_format(self, image_bytes: bytes) -> str: + """Detect image format from magic bytes""" + # Check magic bytes for common formats + if image_bytes.startswith(b"\x89PNG"): + return "png" + elif image_bytes.startswith(b"\xff\xd8\xff"): + return "jpeg" + elif image_bytes.startswith(b"GIF87a") or image_bytes.startswith(b"GIF89a"): + return "gif" + elif image_bytes.startswith(b"RIFF") and b"WEBP" in image_bytes[:12]: + return "webp" + else: + return "unknown" + + def _create_screenshot_prompt(self, caption: Optional[str]) -> str: + """Create prompt for screenshot analysis""" + base_prompt = """I'm sharing a screenshot with you. Please analyze it and help me with: + +1. Identifying what application or website this is from +2. Understanding the UI elements and their purpose +3. Any issues or improvements you notice +4. Answering any specific questions I have + +""" + if caption: + base_prompt += f"Specific request: {caption}" + + return base_prompt + + def _create_diagram_prompt(self, caption: Optional[str]) -> str: + """Create prompt for diagram analysis""" + base_prompt = """I'm sharing a diagram with you. Please help me: + +1. Understand the components and their relationships +2. Identify the type of diagram (flowchart, architecture, etc.) +3. Explain any technical concepts shown +4. Suggest improvements or clarifications + +""" + if caption: + base_prompt += f"Specific request: {caption}" + + return base_prompt + + def _create_ui_prompt(self, caption: Optional[str]) -> str: + """Create prompt for UI mockup analysis""" + base_prompt = """I'm sharing a UI mockup with you. Please analyze: + +1. The layout and visual hierarchy +2. User experience considerations +3. Accessibility aspects +4. Implementation suggestions +5. Any potential improvements + +""" + if caption: + base_prompt += f"Specific request: {caption}" + + return base_prompt + + def _create_generic_prompt(self, caption: Optional[str]) -> str: + """Create generic image analysis prompt""" + base_prompt = """I'm sharing an image with you. Please analyze it and provide relevant insights. + +""" + if caption: + base_prompt += f"Context: {caption}" + + return base_prompt + + def supports_format(self, filename: str) -> bool: + """Check if image format is supported""" + if not filename: + return False + + # Extract extension + parts = filename.lower().split(".") + if len(parts) < 2: + return False + + extension = f".{parts[-1]}" + return extension in self.supported_formats + + async def validate_image(self, image_bytes: bytes) -> tuple[bool, Optional[str]]: + """Validate image data""" + # Check size + max_size = 10 * 1024 * 1024 # 10MB + if len(image_bytes) > max_size: + return False, "Image too large (max 10MB)" + + # Check format + format_type = self._detect_format(image_bytes) + if format_type == "unknown": + return False, "Unsupported image format" + + # Basic validity check + if len(image_bytes) < 100: # Too small to be a real image + return False, "Invalid image data" + + return True, None diff --git a/src/bot/features/quick_actions.py b/src/bot/features/quick_actions.py new file mode 100644 index 00000000..2da239af --- /dev/null +++ b/src/bot/features/quick_actions.py @@ -0,0 +1,271 @@ +"""Quick Actions feature implementation. + +Provides context-aware quick action suggestions for common development tasks. +""" + +import logging +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional + +from telegram import InlineKeyboardButton, InlineKeyboardMarkup + +from src.storage.models import SessionModel + +logger = logging.getLogger(__name__) + + +@dataclass +class QuickAction: + """Represents a quick action suggestion.""" + + id: str + name: str + description: str + command: str + icon: str + category: str + context_required: List[str] # Required context keys + priority: int = 0 # Higher = more important + + +class QuickActionManager: + """Manages quick action suggestions based on context.""" + + def __init__(self) -> None: + """Initialize the quick action manager.""" + self.actions = self._create_default_actions() + self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") + + def _create_default_actions(self) -> Dict[str, QuickAction]: + """Create default quick actions.""" + return { + "test": QuickAction( + id="test", + name="Run Tests", + description="Run project tests", + command="test", + icon="🧪", + category="testing", + context_required=["has_tests"], + priority=10, + ), + "install": QuickAction( + id="install", + name="Install Dependencies", + description="Install project dependencies", + command="install", + icon="📦", + category="setup", + context_required=["has_package_manager"], + priority=9, + ), + "format": QuickAction( + id="format", + name="Format Code", + description="Format code with project formatter", + command="format", + icon="🎨", + category="quality", + context_required=["has_formatter"], + priority=7, + ), + "lint": QuickAction( + id="lint", + name="Lint Code", + description="Check code quality", + command="lint", + icon="🔍", + category="quality", + context_required=["has_linter"], + priority=8, + ), + "security": QuickAction( + id="security", + name="Security Scan", + description="Run security vulnerability scan", + command="security", + icon="🔒", + category="security", + context_required=["has_dependencies"], + priority=6, + ), + "optimize": QuickAction( + id="optimize", + name="Optimize", + description="Optimize code performance", + command="optimize", + icon="⚡", + category="performance", + context_required=["has_code"], + priority=5, + ), + "document": QuickAction( + id="document", + name="Generate Docs", + description="Generate documentation", + command="document", + icon="📝", + category="documentation", + context_required=["has_code"], + priority=4, + ), + "refactor": QuickAction( + id="refactor", + name="Refactor", + description="Suggest code improvements", + command="refactor", + icon="🔧", + category="quality", + context_required=["has_code"], + priority=3, + ), + } + + async def get_suggestions( + self, session: SessionModel, limit: int = 6 + ) -> List[QuickAction]: + """Get quick action suggestions based on session context. + + Args: + session: Current session + limit: Maximum number of suggestions + + Returns: + List of suggested actions + """ + try: + # Analyze context + context = await self._analyze_context(session) + + # Filter actions based on context + available_actions = [] + for action in self.actions.values(): + if self._is_action_available(action, context): + available_actions.append(action) + + # Sort by priority and return top N + available_actions.sort(key=lambda x: x.priority, reverse=True) + return available_actions[:limit] + + except Exception as e: + self.logger.error(f"Error getting suggestions: {e}") + return [] + + async def _analyze_context(self, session: SessionModel) -> Dict[str, Any]: + """Analyze session context to determine available actions. + + Args: + session: Current session + + Returns: + Context dictionary + """ + context = { + "has_code": True, # Default assumption + "has_tests": False, + "has_package_manager": False, + "has_formatter": False, + "has_linter": False, + "has_dependencies": False, + } + + # Analyze recent messages for context clues + if session.context: + recent_messages = session.context.get("recent_messages", []) + for msg in recent_messages: + content = msg.get("content", "").lower() + + # Check for test indicators + if any(word in content for word in ["test", "pytest", "unittest"]): + context["has_tests"] = True + + # Check for package manager indicators + if any(word in content for word in ["pip", "poetry", "npm", "yarn"]): + context["has_package_manager"] = True + context["has_dependencies"] = True + + # Check for formatter indicators + if any(word in content for word in ["black", "prettier", "format"]): + context["has_formatter"] = True + + # Check for linter indicators + if any( + word in content for word in ["flake8", "pylint", "eslint", "mypy"] + ): + context["has_linter"] = True + + # File-based context analysis could be added here + # For now, we'll use heuristics based on session history + + return context + + def _is_action_available( + self, action: QuickAction, context: Dict[str, Any] + ) -> bool: + """Check if an action is available in the given context. + + Args: + action: The action to check + context: Current context + + Returns: + True if action is available + """ + # Check all required context keys + for key in action.context_required: + if not context.get(key, False): + return False + return True + + def create_inline_keyboard( + self, actions: List[QuickAction], columns: int = 2 + ) -> InlineKeyboardMarkup: + """Create inline keyboard for quick actions. + + Args: + actions: List of actions to display + columns: Number of columns in keyboard + + Returns: + Inline keyboard markup + """ + keyboard = [] + row = [] + + for i, action in enumerate(actions): + button = InlineKeyboardButton( + text=f"{action.icon} {action.name}", + callback_data=f"quick_action:{action.id}", + ) + row.append(button) + + # Add row when full or last item + if len(row) >= columns or i == len(actions) - 1: + keyboard.append(row) + row = [] + + return InlineKeyboardMarkup(keyboard) + + async def execute_action( + self, action_id: str, session: SessionModel, callback: Optional[Callable] = None + ) -> str: + """Execute a quick action. + + Args: + action_id: ID of action to execute + session: Current session + callback: Optional callback for command execution + + Returns: + Command to execute + """ + action = self.actions.get(action_id) + if not action: + raise ValueError(f"Unknown action: {action_id}") + + self.logger.info( + f"Executing quick action: {action.name} for session {session.id}" + ) + + # Return the command - actual execution is handled by the bot + return action.command diff --git a/src/bot/features/registry.py b/src/bot/features/registry.py new file mode 100644 index 00000000..c5cdbc6d --- /dev/null +++ b/src/bot/features/registry.py @@ -0,0 +1,141 @@ +""" +Central feature registry and management +""" + +from typing import Any, Dict, Optional + +import structlog + +from src.config.settings import Settings +from src.security.validators import SecurityValidator +from src.storage.facade import Storage + +from .conversation_mode import ConversationEnhancer +from .file_handler import FileHandler +from .git_integration import GitIntegration +from .image_handler import ImageHandler +from .quick_actions import QuickActionManager +from .session_export import SessionExporter + +logger = structlog.get_logger(__name__) + + +class FeatureRegistry: + """Manage all bot features""" + + def __init__(self, config: Settings, storage: Storage, security: SecurityValidator): + self.config = config + self.storage = storage + self.security = security + self.features: Dict[str, Any] = {} + + # Initialize features based on config + self._initialize_features() + + def _initialize_features(self): + """Initialize enabled features""" + logger.info("Initializing bot features") + + # File upload handling - conditionally enabled + if self.config.enable_file_uploads: + try: + self.features["file_handler"] = FileHandler( + config=self.config, security=self.security + ) + logger.info("File handler feature enabled") + except Exception as e: + logger.error("Failed to initialize file handler", error=str(e)) + + # Git integration - conditionally enabled + if self.config.enable_git_integration: + try: + self.features["git"] = GitIntegration( + config=self.config, security=self.security + ) + logger.info("Git integration feature enabled") + except Exception as e: + logger.error("Failed to initialize git integration", error=str(e)) + + # Quick actions - conditionally enabled + if self.config.enable_quick_actions: + try: + self.features["quick_actions"] = QuickActionManager() + logger.info("Quick actions feature enabled") + except Exception as e: + logger.error("Failed to initialize quick actions", error=str(e)) + + # Session export - always enabled + try: + self.features["session_export"] = SessionExporter(storage=self.storage) + logger.info("Session export feature enabled") + except Exception as e: + logger.error("Failed to initialize session export", error=str(e)) + + # Image handling - always enabled + try: + self.features["image_handler"] = ImageHandler(config=self.config) + logger.info("Image handler feature enabled") + except Exception as e: + logger.error("Failed to initialize image handler", error=str(e)) + + # Conversation enhancements - always enabled + try: + self.features["conversation"] = ConversationEnhancer() + logger.info("Conversation enhancer feature enabled") + except Exception as e: + logger.error("Failed to initialize conversation enhancer", error=str(e)) + + logger.info( + "Feature initialization complete", + enabled_features=list(self.features.keys()), + ) + + def get_feature(self, name: str) -> Optional[Any]: + """Get feature by name""" + return self.features.get(name) + + def is_enabled(self, feature_name: str) -> bool: + """Check if feature is enabled""" + return feature_name in self.features + + def get_file_handler(self) -> Optional[FileHandler]: + """Get file handler feature""" + return self.get_feature("file_handler") + + def get_git_integration(self) -> Optional[GitIntegration]: + """Get git integration feature""" + return self.get_feature("git") + + def get_quick_actions(self) -> Optional[QuickActionManager]: + """Get quick actions feature""" + return self.get_feature("quick_actions") + + def get_session_export(self) -> Optional[SessionExporter]: + """Get session export feature""" + return self.get_feature("session_export") + + def get_image_handler(self) -> Optional[ImageHandler]: + """Get image handler feature""" + return self.get_feature("image_handler") + + def get_conversation_enhancer(self) -> Optional[ConversationEnhancer]: + """Get conversation enhancer feature""" + return self.get_feature("conversation") + + def get_enabled_features(self) -> Dict[str, Any]: + """Get all enabled features""" + return self.features.copy() + + def shutdown(self): + """Shutdown all features""" + logger.info("Shutting down features") + + # Clear conversation contexts + conversation = self.get_conversation_enhancer() + if conversation: + conversation.conversation_contexts.clear() + + # Clear feature registry + self.features.clear() + + logger.info("Feature shutdown complete") diff --git a/src/bot/features/session_export.py b/src/bot/features/session_export.py new file mode 100644 index 00000000..6729636c --- /dev/null +++ b/src/bot/features/session_export.py @@ -0,0 +1,302 @@ +"""Session export functionality for exporting chat history in various formats.""" + +import json +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import List + +from src.storage.facade import Storage +from src.utils.constants import MAX_SESSION_LENGTH + + +class ExportFormat(Enum): + """Supported export formats.""" + + MARKDOWN = "markdown" + JSON = "json" + HTML = "html" + + +@dataclass +class ExportedSession: + """Exported session data.""" + + format: ExportFormat + content: str + filename: str + mime_type: str + size_bytes: int + created_at: datetime + + +class SessionExporter: + """Handles exporting chat sessions in various formats.""" + + def __init__(self, storage: Storage): + """Initialize exporter with storage dependency. + + Args: + storage: Storage facade for session data access + """ + self.storage = storage + + async def export_session( + self, + user_id: int, + session_id: str, + format: ExportFormat = ExportFormat.MARKDOWN, + ) -> ExportedSession: + """Export a session in the specified format. + + Args: + user_id: User ID + session_id: Session ID to export + format: Export format (markdown, json, html) + + Returns: + ExportedSession with exported content + + Raises: + ValueError: If session not found or invalid format + """ + # Get session data + session = await self.storage.get_session(user_id, session_id) + if not session: + raise ValueError(f"Session {session_id} not found") + + # Get session messages + messages = await self.storage.get_session_messages( + session_id, limit=MAX_SESSION_LENGTH + ) + + # Export based on format + if format == ExportFormat.MARKDOWN: + content = await self._export_markdown(session, messages) + mime_type = "text/markdown" + extension = "md" + elif format == ExportFormat.JSON: + content = await self._export_json(session, messages) + mime_type = "application/json" + extension = "json" + elif format == ExportFormat.HTML: + content = await self._export_html(session, messages) + mime_type = "text/html" + extension = "html" + else: + raise ValueError(f"Unsupported export format: {format}") + + # Create filename + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + filename = f"session_{session_id[:8]}_{timestamp}.{extension}" + + return ExportedSession( + format=format, + content=content, + filename=filename, + mime_type=mime_type, + size_bytes=len(content.encode()), + created_at=datetime.utcnow(), + ) + + async def _export_markdown(self, session: dict, messages: list) -> str: + """Export session as Markdown. + + Args: + session: Session metadata + messages: List of messages + + Returns: + Markdown formatted content + """ + lines = [] + + # Header + lines.append(f"# Claude Code Session Export") + lines.append(f"\n**Session ID:** `{session['id']}`") + lines.append(f"**Created:** {session['created_at']}") + if session.get("updated_at"): + lines.append(f"**Last Updated:** {session['updated_at']}") + lines.append(f"**Message Count:** {len(messages)}") + lines.append("\n---\n") + + # Messages + for msg in messages: + timestamp = msg["created_at"] + role = "You" if msg["role"] == "user" else "Claude" + content = msg["content"] + + lines.append(f"### {role} - {timestamp}") + lines.append(f"\n{content}\n") + lines.append("---\n") + + return "\n".join(lines) + + async def _export_json(self, session: dict, messages: list) -> str: + """Export session as JSON. + + Args: + session: Session metadata + messages: List of messages + + Returns: + JSON formatted content + """ + export_data = { + "session": { + "id": session["id"], + "user_id": session["user_id"], + "created_at": session["created_at"].isoformat(), + "updated_at": ( + session.get("updated_at", "").isoformat() + if session.get("updated_at") + else None + ), + "message_count": len(messages), + }, + "messages": [ + { + "id": msg["id"], + "role": msg["role"], + "content": msg["content"], + "created_at": msg["created_at"].isoformat(), + } + for msg in messages + ], + } + + return json.dumps(export_data, indent=2, ensure_ascii=False) + + async def _export_html(self, session: dict, messages: list) -> str: + """Export session as HTML. + + Args: + session: Session metadata + messages: List of messages + + Returns: + HTML formatted content + """ + # Convert markdown content to HTML-safe format + markdown_content = await self._export_markdown(session, messages) + html_content = self._markdown_to_html(markdown_content) + + # HTML template + template = f""" + + + + + Claude Code Session - {session['id'][:8]} + + + +
+ {html_content} +
+ +""" + + return template + + def _markdown_to_html(self, markdown: str) -> str: + """Convert markdown to HTML. + + Simple conversion for basic markdown elements. + + Args: + markdown: Markdown content + + Returns: + HTML content + """ + html = markdown + + # Headers + html = html.replace("# ", "

").replace("\n\n", "

\n\n", 1) + html = html.replace("### ", "

").replace("\n", "

\n", 3) + + # Bold + import re + + html = re.sub(r"\*\*([^*]+)\*\*", r"\1", html) + + # Code blocks + html = re.sub(r"`([^`]+)`", r"\1", html) + + # Line breaks and paragraphs + html = html.replace("\n\n", "

\n

") + html = f"

{html}

" + + # Clean up empty paragraphs + html = html.replace("

", "") + html = html.replace("

", "") + html = html.replace("

", "") + + # Horizontal rules + html = html.replace("

---

", "
") + + return html diff --git a/src/bot/handlers/callback.py b/src/bot/handlers/callback.py index ac5fb022..f52381da 100644 --- a/src/bot/handlers/callback.py +++ b/src/bot/handlers/callback.py @@ -37,6 +37,10 @@ async def handle_callback_query( "action": handle_action_callback, "confirm": handle_confirm_callback, "quick": handle_quick_action_callback, + "followup": handle_followup_callback, + "conversation": handle_conversation_callback, + "git": handle_git_callback, + "export": handle_export_callback, } handler = handlers.get(action) @@ -59,14 +63,14 @@ async def handle_callback_query( try: await query.edit_message_text( - f"❌ **Error Processing Action**\n\n" + "❌ **Error Processing Action**\n\n" f"An error occurred while processing your request.\n" f"Please try again or use text commands." ) - except: + except Exception: # If we can't edit the message, send a new one await query.message.reply_text( - f"❌ **Error Processing Action**\n\n" + "❌ **Error Processing Action**\n\n" f"An error occurred while processing your request." ) @@ -192,36 +196,6 @@ async def handle_action_callback( ) -async def handle_quick_action_callback( - query, action_type: str, context: ContextTypes.DEFAULT_TYPE -) -> None: - """Handle quick action buttons.""" - quick_actions = { - "test": "Run all tests in the current directory", - "install": "Install dependencies (detect package manager and run install command)", - "format": "Format all code files in the current directory", - "lint": "Run linter on all files and show any issues", - "git_status": "Show git status and recent commits", - "find_todos": "Find all TODO, FIXME, and NOTE comments in the codebase", - "build": "Build the project using the appropriate build system", - "start": "Start the development server", - } - - prompt = quick_actions.get(action_type) - if prompt: - await query.edit_message_text( - f"🚀 **Quick Action: {action_type.title()}**\n\n" - f"Request: _{prompt}_\n\n" - f"This will be processed once Claude Code integration is complete.\n\n" - f"**Current Status:**\n" - f"• Bot core: ✅ Ready\n" - f"• Claude integration: 🔄 In development\n\n" - f"_You can use text commands now to simulate this request._" - ) - else: - await query.edit_message_text(f"❌ **Unknown Quick Action: {action_type}**") - - async def handle_confirm_callback( query, confirmation_type: str, context: ContextTypes.DEFAULT_TYPE ) -> None: @@ -510,16 +484,18 @@ async def _handle_continue_action(query, context: ContextTypes.DEFAULT_TYPE) -> f"• Check your session status\n" f"• Navigate to a different directory", parse_mode="Markdown", - reply_markup=InlineKeyboardMarkup([ + reply_markup=InlineKeyboardMarkup( [ - InlineKeyboardButton( - "🆕 New Session", callback_data="action:new_session" - ), - InlineKeyboardButton( - "📊 Status", callback_data="action:status" - ), + [ + InlineKeyboardButton( + "🆕 New Session", callback_data="action:new_session" + ), + InlineKeyboardButton( + "📊 Status", callback_data="action:status" + ), + ] ] - ]) + ), ) except Exception as e: @@ -529,13 +505,15 @@ async def _handle_continue_action(query, context: ContextTypes.DEFAULT_TYPE) -> f"An error occurred: `{str(e)}`\n\n" f"Try starting a new session instead.", parse_mode="Markdown", - reply_markup=InlineKeyboardMarkup([ + reply_markup=InlineKeyboardMarkup( [ - InlineKeyboardButton( - "🆕 New Session", callback_data="action:new_session" - ) + [ + InlineKeyboardButton( + "🆕 New Session", callback_data="action:new_session" + ) + ] ] - ]) + ), ) @@ -765,6 +743,375 @@ async def _handle_export_action(query, context: ContextTypes.DEFAULT_TYPE) -> No ) +async def handle_followup_callback( + query, suggestion_hash: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle follow-up suggestion callbacks.""" + user_id = query.from_user.id + + # Get conversation enhancer from bot data if available + conversation_enhancer = context.bot_data.get("conversation_enhancer") + + if not conversation_enhancer: + await query.edit_message_text( + "❌ **Follow-up Not Available**\n\n" + "Conversation enhancement features are not available." + ) + return + + try: + # Get stored suggestions (this would need to be implemented in the enhancer) + # For now, we'll provide a generic response + await query.edit_message_text( + "💡 **Follow-up Suggestion Selected**\n\n" + "This follow-up suggestion will be implemented once the conversation " + "enhancement system is fully integrated with the message handler.\n\n" + "**Current Status:**\n" + "• Suggestion received ✅\n" + "• Integration pending 🔄\n\n" + "_You can continue the conversation by sending a new message._" + ) + + logger.info( + "Follow-up suggestion selected", + user_id=user_id, + suggestion_hash=suggestion_hash, + ) + + except Exception as e: + logger.error( + "Error handling follow-up callback", + error=str(e), + user_id=user_id, + suggestion_hash=suggestion_hash, + ) + + await query.edit_message_text( + "❌ **Error Processing Follow-up**\n\n" + "An error occurred while processing your follow-up suggestion." + ) + + +async def handle_conversation_callback( + query, action_type: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle conversation control callbacks.""" + user_id = query.from_user.id + settings: Settings = context.bot_data["settings"] + + if action_type == "continue": + # Remove suggestion buttons and show continue message + await query.edit_message_text( + "✅ **Continuing Conversation**\n\n" + "Send me your next message to continue coding!\n\n" + "I'm ready to help with:\n" + "• Code review and debugging\n" + "• Feature implementation\n" + "• Architecture decisions\n" + "• Testing and optimization\n" + "• Documentation\n\n" + "_Just type your request or upload files._" + ) + + elif action_type == "end": + # End the current session + conversation_enhancer = context.bot_data.get("conversation_enhancer") + if conversation_enhancer: + conversation_enhancer.clear_context(user_id) + + # Clear session data + context.user_data["claude_session_id"] = None + context.user_data["session_started"] = False + + current_dir = context.user_data.get( + "current_directory", settings.approved_directory + ) + relative_path = current_dir.relative_to(settings.approved_directory) + + # Create quick action buttons + keyboard = [ + [ + InlineKeyboardButton( + "🆕 New Session", callback_data="action:new_session" + ), + InlineKeyboardButton( + "📁 Change Project", callback_data="action:show_projects" + ), + ], + [ + InlineKeyboardButton("📊 Status", callback_data="action:status"), + InlineKeyboardButton("❓ Help", callback_data="action:help"), + ], + ] + reply_markup = InlineKeyboardMarkup(keyboard) + + await query.edit_message_text( + "✅ **Conversation Ended**\n\n" + f"Your Claude session has been terminated.\n\n" + f"**Current Status:**\n" + f"• Directory: `{relative_path}/`\n" + f"• Session: None\n" + f"• Ready for new commands\n\n" + f"**Next Steps:**\n" + f"• Start a new session\n" + f"• Check status\n" + f"• Send any message to begin a new conversation", + parse_mode="Markdown", + reply_markup=reply_markup, + ) + + logger.info("Conversation ended via callback", user_id=user_id) + + else: + await query.edit_message_text( + f"❌ **Unknown Conversation Action: {action_type}**\n\n" + "This conversation action is not recognized." + ) + + +async def handle_git_callback( + query, git_action: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle git-related callbacks.""" + user_id = query.from_user.id + settings: Settings = context.bot_data["settings"] + features = context.bot_data.get("features") + + if not features or not features.is_enabled("git"): + await query.edit_message_text( + "❌ **Git Integration Disabled**\n\n" + "Git integration feature is not enabled." + ) + return + + current_dir = context.user_data.get( + "current_directory", settings.approved_directory + ) + + try: + git_integration = features.get_git_integration() + if not git_integration: + await query.edit_message_text( + "❌ **Git Integration Unavailable**\n\n" + "Git integration service is not available." + ) + return + + if git_action == "status": + # Refresh git status + git_status = await git_integration.get_status(current_dir) + status_message = git_integration.format_status(git_status) + + keyboard = [ + [ + InlineKeyboardButton("📊 Show Diff", callback_data="git:diff"), + InlineKeyboardButton("📜 Show Log", callback_data="git:log"), + ], + [ + InlineKeyboardButton("🔄 Refresh", callback_data="git:status"), + InlineKeyboardButton("📁 Files", callback_data="action:ls"), + ], + ] + reply_markup = InlineKeyboardMarkup(keyboard) + + await query.edit_message_text( + status_message, parse_mode="Markdown", reply_markup=reply_markup + ) + + elif git_action == "diff": + # Show git diff + diff_output = await git_integration.get_diff(current_dir) + + if not diff_output.strip(): + diff_message = "📊 **Git Diff**\n\n_No changes to show._" + else: + # Limit diff output + max_length = 2000 + if len(diff_output) > max_length: + diff_output = ( + diff_output[:max_length] + "\n\n_... output truncated ..._" + ) + + diff_message = f"📊 **Git Diff**\n\n```\n{diff_output}\n```" + + keyboard = [ + [ + InlineKeyboardButton("📜 Show Log", callback_data="git:log"), + InlineKeyboardButton("📊 Status", callback_data="git:status"), + ] + ] + reply_markup = InlineKeyboardMarkup(keyboard) + + await query.edit_message_text( + diff_message, parse_mode="Markdown", reply_markup=reply_markup + ) + + elif git_action == "log": + # Show git log + commits = await git_integration.get_file_history(current_dir, ".") + + if not commits: + log_message = "📜 **Git Log**\n\n_No commits found._" + else: + log_message = "📜 **Git Log**\n\n" + for commit in commits[:10]: # Show last 10 commits + short_hash = commit.hash[:7] + short_message = commit.message[:60] + if len(commit.message) > 60: + short_message += "..." + log_message += f"• `{short_hash}` {short_message}\n" + + keyboard = [ + [ + InlineKeyboardButton("📊 Show Diff", callback_data="git:diff"), + InlineKeyboardButton("📊 Status", callback_data="git:status"), + ] + ] + reply_markup = InlineKeyboardMarkup(keyboard) + + await query.edit_message_text( + log_message, parse_mode="Markdown", reply_markup=reply_markup + ) + + else: + await query.edit_message_text( + f"❌ **Unknown Git Action: {git_action}**\n\n" + "This git action is not recognized." + ) + + except Exception as e: + logger.error( + "Error in git callback", + error=str(e), + git_action=git_action, + user_id=user_id, + ) + await query.edit_message_text(f"❌ **Git Error**\n\n{str(e)}") + + +async def handle_export_callback( + query, export_format: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle export format selection callbacks.""" + user_id = query.from_user.id + features = context.bot_data.get("features") + + if export_format == "cancel": + await query.edit_message_text( + "📤 **Export Cancelled**\n\n" "Session export has been cancelled." + ) + return + + session_exporter = features.get_session_export() if features else None + if not session_exporter: + await query.edit_message_text( + "❌ **Export Unavailable**\n\n" "Session export service is not available." + ) + return + + # Get current session + claude_session_id = context.user_data.get("claude_session_id") + if not claude_session_id: + await query.edit_message_text( + "❌ **No Active Session**\n\n" "There's no active session to export." + ) + return + + try: + # Show processing message + await query.edit_message_text( + f"📤 **Exporting Session**\n\n" + f"Generating {export_format.upper()} export...", + parse_mode="Markdown", + ) + + # Export session + exported_session = await session_exporter.export_session( + claude_session_id, export_format + ) + + # Send the exported file + from io import BytesIO + + file_bytes = BytesIO(exported_session.content.encode("utf-8")) + file_bytes.name = exported_session.filename + + await query.message.reply_document( + document=file_bytes, + filename=exported_session.filename, + caption=( + f"📤 **Session Export Complete**\n\n" + f"Format: {exported_session.format.upper()}\n" + f"Size: {exported_session.size_bytes:,} bytes\n" + f"Created: {exported_session.created_at.strftime('%Y-%m-%d %H:%M:%S')}" + ), + parse_mode="Markdown", + ) + + # Update the original message + await query.edit_message_text( + f"✅ **Export Complete**\n\n" + f"Your session has been exported as {exported_session.filename}.\n" + f"Check the file above for your complete conversation history.", + parse_mode="Markdown", + ) + + except Exception as e: + logger.error( + "Export failed", error=str(e), user_id=user_id, format=export_format + ) + await query.edit_message_text(f"❌ **Export Failed**\n\n{str(e)}") + + +async def handle_quick_action_callback( + query, action_id: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle quick action callbacks.""" + # For now, just show a message that the action isn't implemented + await query.edit_message_text( + f"❌ **Quick Action Not Implemented**\n\n" + f"The quick action '{action_id}' is not yet implemented.\n" + f"Please use text commands instead.", + parse_mode="Markdown", + ) + + +async def handle_followup_callback( + query, action_id: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle followup action callbacks.""" + await query.edit_message_text( + f"❌ **Followup Action Not Implemented**\n\n" + f"The followup action '{action_id}' is not yet implemented.\n" + f"Please use text commands instead.", + parse_mode="Markdown", + ) + + +async def handle_conversation_callback( + query, action: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle conversation mode callbacks.""" + await query.edit_message_text( + f"❌ **Conversation Action Not Implemented**\n\n" + f"The conversation action '{action}' is not yet implemented.\n" + f"Please use text commands instead.", + parse_mode="Markdown", + ) + + +async def handle_export_callback( + query, format: str, context: ContextTypes.DEFAULT_TYPE +) -> None: + """Handle export callbacks.""" + await query.edit_message_text( + f"❌ **Export Not Implemented**\n\n" + f"Export to {format} format is not yet implemented.\n" + f"Please use text commands instead.", + parse_mode="Markdown", + ) + + def _format_file_size(size: int) -> str: """Format file size in human-readable format.""" for unit in ["B", "KB", "MB", "GB"]: diff --git a/src/bot/handlers/command.py b/src/bot/handlers/command.py index 81774e85..c4536193 100644 --- a/src/bot/handlers/command.py +++ b/src/bot/handlers/command.py @@ -1,8 +1,5 @@ """Command handlers for bot operations.""" -from pathlib import Path -from typing import List - import structlog from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update from telegram.ext import ContextTypes @@ -28,7 +25,9 @@ async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> N f"• `/ls` - List files in current directory\n" f"• `/cd ` - Change directory\n" f"• `/projects` - Show available projects\n" - f"• `/status` - Show session status\n\n" + f"• `/status` - Show session status\n" + f"• `/actions` - Show quick actions\n" + f"• `/git` - Git repository commands\n\n" f"**Quick Start:**\n" f"1. Use `/projects` to see available projects\n" f"2. Use `/cd ` to navigate to a project\n" @@ -78,7 +77,9 @@ async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No "• `/continue [message]` - Continue last session (optionally with message)\n" "• `/end` - End current session\n" "• `/status` - Show session and usage status\n" - "• `/export` - Export session history\n\n" + "• `/export` - Export session history\n" + "• `/actions` - Show context-aware quick actions\n" + "• `/git` - Git repository information\n\n" "**Usage Examples:**\n" "• `cd myproject` - Enter project directory\n" "• `ls` - See what's in current directory\n" @@ -106,7 +107,6 @@ async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No async def new_session(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /new command.""" - user_id = update.effective_user.id settings: Settings = context.bot_data["settings"] # For now, we'll use a simple session concept @@ -151,14 +151,13 @@ async def new_session(update: Update, context: ContextTypes.DEFAULT_TYPE) -> Non async def continue_session(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /continue command with optional prompt.""" - user_id = update.effective_user.id settings: Settings = context.bot_data["settings"] claude_integration: ClaudeIntegration = context.bot_data.get("claude_integration") audit_logger: AuditLogger = context.bot_data.get("audit_logger") # Parse optional prompt from command arguments prompt = " ".join(context.args) if context.args else None - + current_dir = context.user_data.get( "current_directory", settings.approved_directory ) @@ -211,12 +210,13 @@ async def continue_session(update: Update, context: ContextTypes.DEFAULT_TYPE) - # Delete status message and send response await status_msg.delete() - + # Format and send Claude's response from ..utils.formatting import ResponseFormatter + formatter = ResponseFormatter() formatted_messages = formatter.format_claude_response(claude_response) - + for msg in formatted_messages: await update.message.reply_text( msg.content, @@ -227,10 +227,10 @@ async def continue_session(update: Update, context: ContextTypes.DEFAULT_TYPE) - # Log successful continue if audit_logger: await audit_logger.log_command( - user_id=user_id, - command="continue", - args=context.args or [], - success=True + user_id=user_id, + command="continue", + args=context.args or [], + success=True, ) else: @@ -244,16 +244,18 @@ async def continue_session(update: Update, context: ContextTypes.DEFAULT_TYPE) - f"• Use `/status` to check your sessions\n" f"• Navigate to a different directory with `/cd`", parse_mode="Markdown", - reply_markup=InlineKeyboardMarkup([ + reply_markup=InlineKeyboardMarkup( [ - InlineKeyboardButton( - "🆕 New Session", callback_data="action:new_session" - ), - InlineKeyboardButton( - "📊 Status", callback_data="action:status" - ), + [ + InlineKeyboardButton( + "🆕 New Session", callback_data="action:new_session" + ), + InlineKeyboardButton( + "📊 Status", callback_data="action:status" + ), + ] ] - ]) + ), ) except Exception as e: @@ -262,9 +264,9 @@ async def continue_session(update: Update, context: ContextTypes.DEFAULT_TYPE) - # Delete status message if it exists try: - if 'status_msg' in locals(): + if "status_msg" in locals(): await status_msg.delete() - except: + except Exception: pass # Send error response @@ -282,16 +284,15 @@ async def continue_session(update: Update, context: ContextTypes.DEFAULT_TYPE) - # Log failed continue if audit_logger: await audit_logger.log_command( - user_id=user_id, - command="continue", - args=context.args or [], - success=False + user_id=user_id, + command="continue", + args=context.args or [], + success=False, ) async def list_files(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /ls command.""" - user_id = update.effective_user.id settings: Settings = context.bot_data["settings"] audit_logger: AuditLogger = context.bot_data.get("audit_logger") @@ -383,7 +384,6 @@ async def list_files(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None async def change_directory(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /cd command.""" - user_id = update.effective_user.id settings: Settings = context.bot_data["settings"] security_validator: SecurityValidator = context.bot_data.get("security_validator") audit_logger: AuditLogger = context.bot_data.get("audit_logger") @@ -574,7 +574,6 @@ async def show_projects(update: Update, context: ContextTypes.DEFAULT_TYPE) -> N async def session_status(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /status command.""" - user_id = update.effective_user.id settings: Settings = context.bot_data["settings"] # Get session info @@ -648,21 +647,62 @@ async def session_status(update: Update, context: ContextTypes.DEFAULT_TYPE) -> async def export_session(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /export command.""" - # For now, this is a placeholder since we haven't implemented session storage yet + user_id = update.effective_user.id + features = context.bot_data.get("features") + + # Check if session export is available + session_exporter = features.get_session_export() if features else None + + if not session_exporter: + await update.message.reply_text( + "📤 **Export Session**\n\n" + "Session export functionality is not available.\n\n" + "**Planned features:**\n" + "• Export conversation history\n" + "• Save session state\n" + "• Share conversations\n" + "• Create session backups" + ) + return + + # Get current session + claude_session_id = context.user_data.get("claude_session_id") + + if not claude_session_id: + await update.message.reply_text( + "❌ **No Active Session**\n\n" + "There's no active Claude session to export.\n\n" + "**What you can do:**\n" + "• Start a new session with `/new`\n" + "• Continue an existing session with `/continue`\n" + "• Check your status with `/status`" + ) + return + + # Create export format selection keyboard + keyboard = [ + [ + InlineKeyboardButton("📝 Markdown", callback_data="export:markdown"), + InlineKeyboardButton("🌐 HTML", callback_data="export:html"), + ], + [ + InlineKeyboardButton("📋 JSON", callback_data="export:json"), + InlineKeyboardButton("❌ Cancel", callback_data="export:cancel"), + ], + ] + reply_markup = InlineKeyboardMarkup(keyboard) + await update.message.reply_text( "📤 **Export Session**\n\n" - "Session export functionality will be available once the storage layer is implemented.\n\n" - "This will allow you to:\n" - "• Export conversation history\n" - "• Save session state\n" - "• Share conversations\n" - "• Create session backups" + f"Ready to export session: `{claude_session_id[:8]}...`\n\n" + "**Choose export format:**", + parse_mode="Markdown", + reply_markup=reply_markup, ) async def end_session(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /end command to terminate the current session.""" - user_id = update.effective_user.id settings: Settings = context.bot_data["settings"] # Check if there's an active session @@ -723,6 +763,163 @@ async def end_session(update: Update, context: ContextTypes.DEFAULT_TYPE) -> Non logger.info("Session ended by user", user_id=user_id, session_id=claude_session_id) +async def quick_actions(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Handle /actions command to show quick actions.""" + settings: Settings = context.bot_data["settings"] + features = context.bot_data.get("features") + + if not features or not features.is_enabled("quick_actions"): + await update.message.reply_text( + "❌ **Quick Actions Disabled**\n\n" + "Quick actions feature is not enabled.\n" + "Contact your administrator to enable this feature." + ) + return + + # Get current directory + current_dir = context.user_data.get( + "current_directory", settings.approved_directory + ) + + try: + quick_action_manager = features.get_quick_actions() + if not quick_action_manager: + await update.message.reply_text( + "❌ **Quick Actions Unavailable**\n\n" + "Quick actions service is not available." + ) + return + + # Get context-aware actions + actions = await quick_action_manager.get_suggestions( + session_data={"working_directory": str(current_dir), "user_id": user_id} + ) + + if not actions: + await update.message.reply_text( + "🤖 **No Actions Available**\n\n" + "No quick actions are available for the current context.\n\n" + "**Try:**\n" + "• Navigating to a project directory with `/cd`\n" + "• Creating some code files\n" + "• Starting a Claude session with `/new`" + ) + return + + # Create inline keyboard + keyboard = quick_action_manager.create_inline_keyboard(actions, max_columns=2) + + relative_path = current_dir.relative_to(settings.approved_directory) + await update.message.reply_text( + f"⚡ **Quick Actions**\n\n" + f"📂 Context: `{relative_path}/`\n\n" + f"Select an action to execute:", + parse_mode="Markdown", + reply_markup=keyboard, + ) + + except Exception as e: + await update.message.reply_text(f"❌ **Error Loading Actions**\n\n{str(e)}") + logger.error("Error in quick_actions command", error=str(e), user_id=user_id) + + +async def git_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Handle /git command to show git repository information.""" + settings: Settings = context.bot_data["settings"] + features = context.bot_data.get("features") + + if not features or not features.is_enabled("git"): + await update.message.reply_text( + "❌ **Git Integration Disabled**\n\n" + "Git integration feature is not enabled.\n" + "Contact your administrator to enable this feature." + ) + return + + # Get current directory + current_dir = context.user_data.get( + "current_directory", settings.approved_directory + ) + + try: + git_integration = features.get_git_integration() + if not git_integration: + await update.message.reply_text( + "❌ **Git Integration Unavailable**\n\n" + "Git integration service is not available." + ) + return + + # Check if current directory is a git repository + if not (current_dir / ".git").exists(): + await update.message.reply_text( + f"📂 **Not a Git Repository**\n\n" + f"Current directory `{current_dir.relative_to(settings.approved_directory)}/` is not a git repository.\n\n" + f"**Options:**\n" + f"• Navigate to a git repository with `/cd`\n" + f"• Initialize a new repository (ask Claude to help)\n" + f"• Clone an existing repository (ask Claude to help)" + ) + return + + # Get git status + git_status = await git_integration.get_status(current_dir) + + # Format status message + relative_path = current_dir.relative_to(settings.approved_directory) + status_message = f"🔗 **Git Repository Status**\n\n" + status_message += f"📂 Directory: `{relative_path}/`\n" + status_message += f"🌿 Branch: `{git_status.branch}`\n" + + if git_status.ahead > 0: + status_message += f"⬆️ Ahead: {git_status.ahead} commits\n" + if git_status.behind > 0: + status_message += f"⬇️ Behind: {git_status.behind} commits\n" + + # Show file changes + if git_status.has_changes(): + status_message += f"\n**Changes:**\n" + if git_status.modified: + status_message += f"📝 Modified: {len(git_status.modified)} files\n" + if git_status.added: + status_message += f"➕ Added: {len(git_status.added)} files\n" + if git_status.deleted: + status_message += f"➖ Deleted: {len(git_status.deleted)} files\n" + if git_status.untracked: + status_message += f"❓ Untracked: {len(git_status.untracked)} files\n" + else: + status_message += "\n✅ Working directory clean\n" + + # Show recent commits + if git_status.recent_commits: + status_message += f"\n**Recent Commits:**\n" + for commit in git_status.recent_commits[:3]: # Show last 3 + short_hash = commit.hash[:7] + status_message += f"• `{short_hash}` {commit.message[:50]}{'...' if len(commit.message) > 50 else ''}\n" + + # Create action buttons + keyboard = [ + [ + InlineKeyboardButton("📊 Show Diff", callback_data="git:diff"), + InlineKeyboardButton("📜 Show Log", callback_data="git:log"), + ], + [ + InlineKeyboardButton("🔄 Refresh", callback_data="git:status"), + InlineKeyboardButton("📁 Files", callback_data="action:ls"), + ], + ] + + reply_markup = InlineKeyboardMarkup(keyboard) + + await update.message.reply_text( + status_message, parse_mode="Markdown", reply_markup=reply_markup + ) + + except Exception as e: + await update.message.reply_text(f"❌ **Git Error**\n\n{str(e)}") + logger.error("Error in git_command", error=str(e), user_id=user_id) + + def _format_file_size(size: int) -> str: """Format file size in human-readable format.""" for unit in ["B", "KB", "MB", "GB"]: diff --git a/src/bot/handlers/message.py b/src/bot/handlers/message.py index 8eeca8a9..d2fc4970 100644 --- a/src/bot/handlers/message.py +++ b/src/bot/handlers/message.py @@ -16,6 +16,73 @@ logger = structlog.get_logger() +async def _format_progress_update(update_obj) -> Optional[str]: + """Format progress updates with enhanced context and visual indicators.""" + if update_obj.type == "tool_result": + # Show tool completion status + tool_name = "Unknown" + if update_obj.metadata and update_obj.metadata.get("tool_use_id"): + # Try to extract tool name from context if available + tool_name = update_obj.metadata.get("tool_name", "Tool") + + if update_obj.is_error(): + return f"❌ **{tool_name} failed**\n\n_{update_obj.get_error_message()}_" + else: + execution_time = "" + if update_obj.metadata and update_obj.metadata.get("execution_time_ms"): + time_ms = update_obj.metadata["execution_time_ms"] + execution_time = f" ({time_ms}ms)" + return f"✅ **{tool_name} completed**{execution_time}" + + elif update_obj.type == "progress": + # Handle progress updates + progress_text = f"🔄 **{update_obj.content or 'Working...'}**" + + percentage = update_obj.get_progress_percentage() + if percentage is not None: + # Create a simple progress bar + filled = int(percentage / 10) # 0-10 scale + bar = "█" * filled + "░" * (10 - filled) + progress_text += f"\n\n`{bar}` {percentage}%" + + if update_obj.progress: + step = update_obj.progress.get("step") + total_steps = update_obj.progress.get("total_steps") + if step and total_steps: + progress_text += f"\n\nStep {step} of {total_steps}" + + return progress_text + + elif update_obj.type == "error": + # Handle error messages + return f"❌ **Error**\n\n_{update_obj.get_error_message()}_" + + elif update_obj.type == "assistant" and update_obj.tool_calls: + # Show when tools are being called + tool_names = update_obj.get_tool_names() + if tool_names: + tools_text = ", ".join(tool_names) + return f"🔧 **Using tools:** {tools_text}" + + elif update_obj.type == "assistant" and update_obj.content: + # Regular content updates with preview + content_preview = ( + update_obj.content[:150] + "..." + if len(update_obj.content) > 150 + else update_obj.content + ) + return f"🤖 **Claude is working...**\n\n_{content_preview}_" + + elif update_obj.type == "system": + # System initialization or other system messages + if update_obj.metadata and update_obj.metadata.get("subtype") == "init": + tools_count = len(update_obj.metadata.get("tools", [])) + model = update_obj.metadata.get("model", "Claude") + return f"🚀 **Starting {model}** with {tools_count} tools available" + + return None + + def _format_error_message(error_str: str) -> str: """Format error messages for user-friendly display.""" if "usage limit reached" in error_str.lower(): @@ -118,20 +185,12 @@ async def handle_text_message( # Get existing session ID session_id = context.user_data.get("claude_session_id") - # Stream updates handler + # Enhanced stream updates handler with progress tracking async def stream_handler(update_obj): try: - if update_obj.content: - # Update progress message with streaming content - content_preview = ( - update_obj.content[:100] + "..." - if len(update_obj.content) > 100 - else update_obj.content - ) - await progress_msg.edit_text( - f"🤖 **Claude is working...**\n\n" f"_{content_preview}_", - parse_mode="Markdown", - ) + progress_text = await _format_progress_update(update_obj) + if progress_text: + await progress_msg.edit_text(progress_text, parse_mode="Markdown") except Exception as e: logger.warning("Failed to update progress message", error=str(e)) @@ -225,6 +284,52 @@ async def stream_handler(update_obj): # Update session info context.user_data["last_message"] = update.message.text + # Add conversation enhancements if available + features = context.bot_data.get("features") + conversation_enhancer = ( + features.get_conversation_enhancer() if features else None + ) + + if conversation_enhancer and claude_response: + try: + # Update conversation context + conversation_context = conversation_enhancer.update_context( + session_id=claude_response.session_id, + user_id=user_id, + working_directory=str(current_dir), + tools_used=claude_response.tools_used or [], + response_content=claude_response.content, + ) + + # Check if we should show follow-up suggestions + if conversation_enhancer.should_show_suggestions( + claude_response.tools_used or [], claude_response.content + ): + # Generate follow-up suggestions + suggestions = conversation_enhancer.generate_follow_up_suggestions( + claude_response.content, + claude_response.tools_used or [], + conversation_context, + ) + + if suggestions: + # Create keyboard with suggestions + suggestion_keyboard = ( + conversation_enhancer.create_follow_up_keyboard(suggestions) + ) + + # Send follow-up suggestions + await update.message.reply_text( + "💡 **What would you like to do next?**", + parse_mode="Markdown", + reply_markup=suggestion_keyboard, + ) + + except Exception as e: + logger.warning( + "Conversation enhancement failed", error=str(e), user_id=user_id + ) + # Log successful message processing if audit_logger: await audit_logger.log_command( @@ -324,37 +429,202 @@ async def handle_document(update: Update, context: ContextTypes.DEFAULT_TYPE) -> f"📄 Processing file: `{document.file_name}`...", parse_mode="Markdown" ) - # Download and process file - file = await document.get_file() - file_bytes = await file.download_as_bytearray() + # Check if enhanced file handler is available + features = context.bot_data.get("features") + file_handler = features.get_file_handler() if features else None + + if file_handler: + # Use enhanced file handler + try: + processed_file = await file_handler.handle_document_upload( + document, + user_id, + update.message.caption or "Please review this file:", + ) + prompt = processed_file.prompt + + # Update progress message with file type info + await progress_msg.edit_text( + f"📄 Processing {processed_file.type} file: `{document.file_name}`...", + parse_mode="Markdown", + ) + + except Exception as e: + logger.warning( + "Enhanced file handler failed, falling back to basic handler", + error=str(e), + ) + file_handler = None # Fall back to basic handling + + if not file_handler: + # Fall back to basic file handling + file = await document.get_file() + file_bytes = await file.download_as_bytearray() + + # Try to decode as text + try: + content = file_bytes.decode("utf-8") + + # Check content length + max_content_length = 50000 # 50KB of text + if len(content) > max_content_length: + content = ( + content[:max_content_length] + + "\n... (file truncated for processing)" + ) + + # Create prompt with file content + caption = update.message.caption or "Please review this file:" + prompt = f"{caption}\n\n**File:** `{document.file_name}`\n\n```\n{content}\n```" + + except UnicodeDecodeError: + await progress_msg.edit_text( + "❌ **File Format Not Supported**\n\n" + "File must be text-based and UTF-8 encoded.\n\n" + "**Supported formats:**\n" + "• Source code files (.py, .js, .ts, etc.)\n" + "• Text files (.txt, .md)\n" + "• Configuration files (.json, .yaml, .toml)\n" + "• Documentation files" + ) + return + + # Delete progress message + await progress_msg.delete() + + # Create a new progress message for Claude processing + claude_progress_msg = await update.message.reply_text( + "🤖 Processing file with Claude...", parse_mode="Markdown" + ) + + # Get Claude integration from context + claude_integration = context.bot_data.get("claude_integration") - # Try to decode as text + if not claude_integration: + await claude_progress_msg.edit_text( + "❌ **Claude integration not available**\n\n" + "The Claude Code integration is not properly configured.", + parse_mode="Markdown", + ) + return + + # Get current directory and session + current_dir = context.user_data.get( + "current_directory", settings.approved_directory + ) + session_id = context.user_data.get("claude_session_id") + + # Process with Claude try: - content = file_bytes.decode("utf-8") - - # Check content length - max_content_length = 50000 # 50KB of text - if len(content) > max_content_length: - content = ( - content[:max_content_length] - + "\n... (file truncated for processing)" + claude_response = await claude_integration.run_command( + prompt=prompt, + working_directory=current_dir, + user_id=user_id, + session_id=session_id, + ) + + # Update session ID + context.user_data["claude_session_id"] = claude_response.session_id + + # Check if Claude changed the working directory and update our tracking + _update_working_directory_from_claude_response( + claude_response, context, settings, user_id + ) + + # Format and send response + from ..utils.formatting import ResponseFormatter + + formatter = ResponseFormatter(settings) + formatted_messages = formatter.format_claude_response( + claude_response.content + ) + + # Delete progress message + await claude_progress_msg.delete() + + # Send responses + for i, message in enumerate(formatted_messages): + await update.message.reply_text( + message.text, + parse_mode=message.parse_mode, + reply_markup=message.reply_markup, + reply_to_message_id=(update.message.message_id if i == 0 else None), ) - # Create prompt with file content - caption = update.message.caption or "Please review this file:" - prompt = ( - f"{caption}\n\n**File:** `{document.file_name}`\n\n```\n{content}\n```" + if i < len(formatted_messages) - 1: + await asyncio.sleep(0.5) + + except Exception as e: + await claude_progress_msg.edit_text( + _format_error_message(str(e)), parse_mode="Markdown" + ) + logger.error("Claude file processing failed", error=str(e), user_id=user_id) + + # Log successful file processing + if audit_logger: + await audit_logger.log_file_access( + user_id=user_id, + file_path=document.file_name, + action="upload_processed", + success=True, + file_size=document.file_size, + ) + + except Exception as e: + try: + await progress_msg.delete() + except: + pass + + error_msg = f"❌ **Error processing file**\n\n{str(e)}" + await update.message.reply_text(error_msg, parse_mode="Markdown") + + # Log failed file processing + if audit_logger: + await audit_logger.log_file_access( + user_id=user_id, + file_path=document.file_name, + action="upload_failed", + success=False, + file_size=document.file_size, + ) + + logger.error("Error processing document", error=str(e), user_id=user_id) + + +async def handle_photo(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Handle photo uploads.""" + user_id = update.effective_user.id + settings: Settings = context.bot_data["settings"] + + # Check if enhanced image handler is available + features = context.bot_data.get("features") + image_handler = features.get_image_handler() if features else None + + if image_handler: + try: + # Send processing indicator + progress_msg = await update.message.reply_text( + "📸 Processing image...", parse_mode="Markdown" + ) + + # Get the largest photo size + photo = update.message.photo[-1] + + # Process image with enhanced handler + processed_image = await image_handler.process_image( + photo, update.message.caption ) # Delete progress message await progress_msg.delete() - # Create a new progress message for Claude processing + # Create Claude progress message claude_progress_msg = await update.message.reply_text( - "🤖 Processing file with Claude...", parse_mode="Markdown" + "🤖 Analyzing image with Claude...", parse_mode="Markdown" ) - # Get Claude integration from context + # Get Claude integration claude_integration = context.bot_data.get("claude_integration") if not claude_integration: @@ -374,7 +644,7 @@ async def handle_document(update: Update, context: ContextTypes.DEFAULT_TYPE) -> # Process with Claude try: claude_response = await claude_integration.run_command( - prompt=prompt, + prompt=processed_image.prompt, working_directory=current_dir, user_id=user_id, session_id=session_id, @@ -383,11 +653,6 @@ async def handle_document(update: Update, context: ContextTypes.DEFAULT_TYPE) -> # Update session ID context.user_data["claude_session_id"] = claude_response.session_id - # Check if Claude changed the working directory and update our tracking - _update_working_directory_from_claude_response( - claude_response, context, settings, user_id - ) - # Format and send response from ..utils.formatting import ResponseFormatter @@ -418,66 +683,28 @@ async def handle_document(update: Update, context: ContextTypes.DEFAULT_TYPE) -> _format_error_message(str(e)), parse_mode="Markdown" ) logger.error( - "Claude file processing failed", error=str(e), user_id=user_id - ) - - # Log successful file processing - if audit_logger: - await audit_logger.log_file_access( - user_id=user_id, - file_path=document.file_name, - action="upload_processed", - success=True, - file_size=document.file_size, + "Claude image processing failed", error=str(e), user_id=user_id ) - except UnicodeDecodeError: - await progress_msg.edit_text( - "❌ **File Format Not Supported**\n\n" - "File must be text-based and UTF-8 encoded.\n\n" - "**Supported formats:**\n" - "• Source code files (.py, .js, .ts, etc.)\n" - "• Text files (.txt, .md)\n" - "• Configuration files (.json, .yaml, .toml)\n" - "• Documentation files" - ) - - except Exception as e: - try: - await progress_msg.delete() - except: - pass - - error_msg = f"❌ **Error processing file**\n\n{str(e)}" - await update.message.reply_text(error_msg, parse_mode="Markdown") - - # Log failed file processing - if audit_logger: - await audit_logger.log_file_access( - user_id=user_id, - file_path=document.file_name, - action="upload_failed", - success=False, - file_size=document.file_size, + except Exception as e: + logger.error("Image processing failed", error=str(e), user_id=user_id) + await update.message.reply_text( + f"❌ **Error processing image**\n\n{str(e)}", parse_mode="Markdown" ) - - logger.error("Error processing document", error=str(e), user_id=user_id) - - -async def handle_photo(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: - """Handle photo uploads.""" - await update.message.reply_text( - "📸 **Photo Upload**\n\n" - "Photo processing is not yet supported.\n\n" - "**Currently supported:**\n" - "• Text files (.py, .js, .md, etc.)\n" - "• Configuration files\n" - "• Documentation files\n\n" - "**Coming soon:**\n" - "• Image analysis\n" - "• Screenshot processing\n" - "• Diagram interpretation" - ) + else: + # Fall back to unsupported message + await update.message.reply_text( + "📸 **Photo Upload**\n\n" + "Photo processing is not yet supported.\n\n" + "**Currently supported:**\n" + "• Text files (.py, .js, .md, etc.)\n" + "• Configuration files\n" + "• Documentation files\n\n" + "**Coming soon:**\n" + "• Image analysis\n" + "• Screenshot processing\n" + "• Diagram interpretation" + ) def _estimate_text_processing_cost(text: str) -> float: diff --git a/src/bot/utils/formatting.py b/src/bot/utils/formatting.py index 3ee34b24..48fc3800 100644 --- a/src/bot/utils/formatting.py +++ b/src/bot/utils/formatting.py @@ -31,22 +31,53 @@ def __init__(self, settings: Settings): self.max_message_length = 4000 # Telegram limit is 4096, leave some buffer self.max_code_block_length = 3000 # Max length for code blocks - def format_claude_response(self, text: str) -> List[FormattedMessage]: - """Format Claude response into one or more Telegram messages.""" + def format_claude_response( + self, text: str, context: Optional[dict] = None + ) -> List[FormattedMessage]: + """Enhanced formatting with context awareness and semantic chunking.""" # Clean and prepare text text = self._clean_text(text) - # Handle code blocks specially - text = self._format_code_blocks(text) - - # Split into messages if too long - messages = self._split_message(text) - - # Add quick actions to the last message if enabled - if self.settings.enable_quick_actions and messages: - messages[-1].reply_markup = self._get_quick_actions_keyboard() + # Check if we need semantic chunking (for complex content) + if self._should_use_semantic_chunking(text): + # Use enhanced semantic chunking for complex content + chunks = self._semantic_chunk(text, context) + messages = [] + for chunk in chunks: + formatted = self._format_chunk(chunk) + messages.extend(formatted) + else: + # Use original simple formatting for basic content + text = self._format_code_blocks(text) + messages = self._split_message(text) + + # Add context-aware quick actions to the last message + if messages and self.settings.enable_quick_actions: + messages[-1].reply_markup = self._get_contextual_keyboard(context) + + return messages if messages else [FormattedMessage("_(No content to display)_")] + + def _should_use_semantic_chunking(self, text: str) -> bool: + """Determine if semantic chunking is needed.""" + # Use semantic chunking for complex content with multiple code blocks, + # file operations, or very long text + code_block_count = text.count("```") + has_file_operations = any( + indicator in text + for indicator in [ + "Creating file", + "Editing file", + "Reading file", + "Writing to", + "Modified file", + "Deleted file", + "File created", + "File updated", + ] + ) + is_very_long = len(text) > self.max_message_length * 2 - return messages + return code_block_count > 2 or has_file_operations or is_very_long def format_error_message( self, error: str, error_type: str = "Error" @@ -137,6 +168,261 @@ def format_progress_message( return FormattedMessage(text, parse_mode="Markdown") + def _semantic_chunk(self, text: str, context: Optional[dict]) -> List[dict]: + """Split text into semantic chunks based on content type.""" + chunks = [] + + # Identify different content sections + sections = self._identify_sections(text) + + for section in sections: + if section["type"] == "code_block": + chunks.extend(self._chunk_code_block(section)) + elif section["type"] == "explanation": + chunks.extend(self._chunk_explanation(section)) + elif section["type"] == "file_operations": + chunks.append(self._format_file_operations_section(section)) + elif section["type"] == "mixed": + chunks.extend(self._chunk_mixed_content(section)) + else: + # Default text chunking + chunks.extend(self._chunk_text(section)) + + return chunks + + def _identify_sections(self, text: str) -> List[dict]: + """Identify different content types in the text.""" + sections = [] + lines = text.split("\n") + current_section = {"type": "text", "content": "", "start_line": 0} + in_code_block = False + code_start = 0 + + for i, line in enumerate(lines): + # Check for code block markers + if line.strip().startswith("```"): + if not in_code_block: + # Start of code block + if current_section["content"].strip(): + sections.append(current_section) + in_code_block = True + code_start = i + current_section = { + "type": "code_block", + "content": line + "\n", + "start_line": i, + } + else: + # End of code block + current_section["content"] += line + "\n" + sections.append(current_section) + in_code_block = False + current_section = { + "type": "text", + "content": "", + "start_line": i + 1, + } + elif in_code_block: + current_section["content"] += line + "\n" + else: + # Check for file operation patterns + if self._is_file_operation_line(line): + if current_section["type"] != "file_operations": + if current_section["content"].strip(): + sections.append(current_section) + current_section = { + "type": "file_operations", + "content": line + "\n", + "start_line": i, + } + else: + current_section["content"] += line + "\n" + else: + # Regular text + if current_section["type"] != "text": + if current_section["content"].strip(): + sections.append(current_section) + current_section = { + "type": "text", + "content": line + "\n", + "start_line": i, + } + else: + current_section["content"] += line + "\n" + + # Add the last section + if current_section["content"].strip(): + sections.append(current_section) + + return sections + + def _is_file_operation_line(self, line: str) -> bool: + """Check if a line indicates file operations.""" + file_indicators = [ + "Creating file", + "Editing file", + "Reading file", + "Writing to", + "Modified file", + "Deleted file", + "File created", + "File updated", + ] + return any(indicator in line for indicator in file_indicators) + + def _chunk_code_block(self, section: dict) -> List[dict]: + """Handle code block chunking.""" + content = section["content"] + if len(content) <= self.max_code_block_length: + return [{"type": "code_block", "content": content, "format": "single"}] + + # Split large code blocks + chunks = [] + lines = content.split("\n") + current_chunk = lines[0] + "\n" # Start with the ``` line + + for line in lines[1:-1]: # Skip first and last ``` lines + if len(current_chunk + line + "\n```\n") > self.max_code_block_length: + current_chunk += "```" + chunks.append( + {"type": "code_block", "content": current_chunk, "format": "split"} + ) + current_chunk = "```\n" + line + "\n" + else: + current_chunk += line + "\n" + + current_chunk += lines[-1] # Add the closing ``` + chunks.append( + {"type": "code_block", "content": current_chunk, "format": "split"} + ) + + return chunks + + def _chunk_explanation(self, section: dict) -> List[dict]: + """Handle explanation text chunking.""" + content = section["content"] + if len(content) <= self.max_message_length: + return [{"type": "explanation", "content": content}] + + # Split by paragraphs first + paragraphs = content.split("\n\n") + chunks = [] + current_chunk = "" + + for paragraph in paragraphs: + if len(current_chunk + paragraph + "\n\n") > self.max_message_length: + if current_chunk: + chunks.append( + {"type": "explanation", "content": current_chunk.strip()} + ) + current_chunk = paragraph + "\n\n" + else: + current_chunk += paragraph + "\n\n" + + if current_chunk: + chunks.append({"type": "explanation", "content": current_chunk.strip()}) + + return chunks + + def _chunk_mixed_content(self, section: dict) -> List[dict]: + """Handle mixed content sections.""" + # For now, treat as regular text + return self._chunk_text(section) + + def _chunk_text(self, section: dict) -> List[dict]: + """Handle regular text chunking.""" + content = section["content"] + if len(content) <= self.max_message_length: + return [{"type": "text", "content": content}] + + # Split at natural break points + chunks = [] + current_chunk = "" + + sentences = content.split(". ") + for sentence in sentences: + test_chunk = current_chunk + sentence + ". " + if len(test_chunk) > self.max_message_length: + if current_chunk: + chunks.append({"type": "text", "content": current_chunk.strip()}) + current_chunk = sentence + ". " + else: + current_chunk = test_chunk + + if current_chunk: + chunks.append({"type": "text", "content": current_chunk.strip()}) + + return chunks + + def _format_file_operations_section(self, section: dict) -> dict: + """Format file operations section.""" + return {"type": "file_operations", "content": section["content"]} + + def _format_chunk(self, chunk: dict) -> List[FormattedMessage]: + """Format individual chunks into FormattedMessage objects.""" + chunk_type = chunk["type"] + content = chunk["content"] + + if chunk_type == "code_block": + # Format code blocks with proper styling + if chunk.get("format") == "split": + title = ( + "📄 **Code (continued)**" + if "continued" in content + else "📄 **Code**" + ) + else: + title = "📄 **Code**" + + text = f"{title}\n\n{content}" + + elif chunk_type == "file_operations": + # Format file operations with icons + text = f"📁 **File Operations**\n\n{content}" + + elif chunk_type == "explanation": + # Regular explanation text + text = content + + else: + # Default text formatting + text = content + + # Split if still too long + return self._split_message(text) + + def _get_contextual_keyboard( + self, context: Optional[dict] + ) -> Optional[InlineKeyboardMarkup]: + """Get context-aware quick action keyboard.""" + if not context: + return self._get_quick_actions_keyboard() + + buttons = [] + + # Add context-specific buttons + if context.get("has_code"): + buttons.append( + [InlineKeyboardButton("💾 Save Code", callback_data="save_code")] + ) + + if context.get("has_file_operations"): + buttons.append( + [InlineKeyboardButton("📁 Show Files", callback_data="show_files")] + ) + + if context.get("has_errors"): + buttons.append([InlineKeyboardButton("🔧 Debug", callback_data="debug")]) + + # Add default actions + default_buttons = [ + [InlineKeyboardButton("🔄 Continue", callback_data="continue")], + [InlineKeyboardButton("💡 Explain", callback_data="explain")], + ] + buttons.extend(default_buttons) + + return InlineKeyboardMarkup(buttons) if buttons else None + def _clean_text(self, text: str) -> str: """Clean text for Telegram display.""" # Remove excessive whitespace diff --git a/src/claude/facade.py b/src/claude/facade.py index ad61d1aa..bf14258d 100644 --- a/src/claude/facade.py +++ b/src/claude/facade.py @@ -4,7 +4,7 @@ """ from pathlib import Path -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Union import structlog @@ -12,6 +12,7 @@ from .exceptions import ClaudeToolValidationError from .integration import ClaudeProcessManager, ClaudeResponse, StreamUpdate from .monitor import ToolMonitor +from .sdk_integration import ClaudeSDKManager from .session import SessionManager logger = structlog.get_logger() @@ -23,15 +24,29 @@ class ClaudeIntegration: def __init__( self, config: Settings, - process_manager: ClaudeProcessManager, - session_manager: SessionManager, - tool_monitor: ToolMonitor, + process_manager: Optional[ClaudeProcessManager] = None, + sdk_manager: Optional[ClaudeSDKManager] = None, + session_manager: Optional[SessionManager] = None, + tool_monitor: Optional[ToolMonitor] = None, ): """Initialize Claude integration facade.""" self.config = config - self.process_manager = process_manager + + # Initialize both managers for fallback capability + self.sdk_manager = ( + sdk_manager or ClaudeSDKManager(config) if config.use_sdk else None + ) + self.process_manager = process_manager or ClaudeProcessManager(config) + + # Use SDK by default if configured + if config.use_sdk: + self.manager = self.sdk_manager + else: + self.manager = self.process_manager + self.session_manager = session_manager self.tool_monitor = tool_monitor + self._sdk_failed_count = 0 # Track SDK failures for adaptive fallback async def run_command( self, @@ -128,7 +143,7 @@ async def stream_handler(update: StreamUpdate): else session.session_id ) - response = await self.process_manager.execute_command( + response = await self._execute_with_fallback( prompt=prompt, working_directory=working_directory, session_id=claude_session_id, @@ -209,6 +224,85 @@ async def stream_handler(update: StreamUpdate): ) raise + async def _execute_with_fallback( + self, + prompt: str, + working_directory: Path, + session_id: Optional[str] = None, + continue_session: bool = False, + stream_callback: Optional[Callable] = None, + ) -> ClaudeResponse: + """Execute command with SDK->subprocess fallback on JSON decode errors.""" + # Try SDK first if configured + if self.config.use_sdk and self.sdk_manager: + try: + logger.debug("Attempting Claude SDK execution") + response = await self.sdk_manager.execute_command( + prompt=prompt, + working_directory=working_directory, + session_id=session_id, + continue_session=continue_session, + stream_callback=stream_callback, + ) + # Reset failure count on success + self._sdk_failed_count = 0 + return response + + except Exception as e: + error_str = str(e) + # Check if this is a JSON decode error that indicates SDK issues + if ( + "Failed to decode JSON" in error_str + or "JSON decode error" in error_str + or "TaskGroup" in error_str + or "ExceptionGroup" in error_str + ): + self._sdk_failed_count += 1 + logger.warning( + "Claude SDK failed with JSON/TaskGroup error, falling back to subprocess", + error=error_str, + failure_count=self._sdk_failed_count, + error_type=type(e).__name__, + ) + + # Use subprocess fallback + try: + logger.info("Executing with subprocess fallback") + response = await self.process_manager.execute_command( + prompt=prompt, + working_directory=working_directory, + session_id=session_id, + continue_session=continue_session, + stream_callback=stream_callback, + ) + logger.info("Subprocess fallback succeeded") + return response + + except Exception as fallback_error: + logger.error( + "Both SDK and subprocess failed", + sdk_error=error_str, + subprocess_error=str(fallback_error), + ) + # Re-raise the original SDK error since it was the primary method + raise e + else: + # For non-JSON errors, re-raise immediately + logger.error( + "Claude SDK failed with non-JSON error", error=error_str + ) + raise + else: + # Use subprocess directly if SDK not configured + logger.debug("Using subprocess execution (SDK disabled)") + return await self.process_manager.execute_command( + prompt=prompt, + working_directory=working_directory, + session_id=session_id, + continue_session=continue_session, + stream_callback=stream_callback, + ) + async def continue_session( self, user_id: int, @@ -296,7 +390,7 @@ async def shutdown(self) -> None: logger.info("Shutting down Claude integration") # Kill any active processes - await self.process_manager.kill_all_processes() + await self.manager.kill_all_processes() # Clean up expired sessions await self.cleanup_expired_sessions() diff --git a/src/claude/integration.py b/src/claude/integration.py index 010bc079..ed857f73 100644 --- a/src/claude/integration.py +++ b/src/claude/integration.py @@ -11,6 +11,7 @@ import json import uuid from asyncio.subprocess import Process +from collections import deque from dataclasses import dataclass, field from pathlib import Path from typing import Any, AsyncIterator, Callable, Dict, List, Optional @@ -43,22 +44,64 @@ class ClaudeResponse: @dataclass class StreamUpdate: - """Streaming update from Claude.""" + """Enhanced streaming update from Claude with richer context.""" - type: str # 'assistant', 'user', 'system', 'result' + type: str # 'assistant', 'user', 'system', 'result', 'tool_result', 'error', 'progress' content: Optional[str] = None tool_calls: Optional[List[Dict]] = None metadata: Optional[Dict] = None + # Enhanced fields for better tracking + timestamp: Optional[str] = None + session_context: Optional[Dict] = None + progress: Optional[Dict] = None + error_info: Optional[Dict] = None + + # Execution tracking + execution_id: Optional[str] = None + parent_message_id: Optional[str] = None + + def is_error(self) -> bool: + """Check if this update represents an error.""" + return self.type == "error" or ( + self.metadata and self.metadata.get("is_error", False) + ) + + def get_tool_names(self) -> List[str]: + """Extract tool names from tool calls.""" + if not self.tool_calls: + return [] + return [call.get("name") for call in self.tool_calls if call.get("name")] + + def get_progress_percentage(self) -> Optional[int]: + """Get progress percentage if available.""" + if self.progress: + return self.progress.get("percentage") + return None + + def get_error_message(self) -> Optional[str]: + """Get error message if this is an error update.""" + if self.error_info: + return self.error_info.get("message") + elif self.is_error() and self.content: + return self.content + return None + class ClaudeProcessManager: - """Manage Claude Code subprocess execution.""" + """Manage Claude Code subprocess execution with memory optimization.""" def __init__(self, config: Settings): """Initialize process manager with configuration.""" self.config = config self.active_processes: Dict[str, Process] = {} + # Memory optimization settings + self.max_message_buffer = 1000 # Limit message history + self.streaming_buffer_size = ( + 65536 # 64KB streaming buffer for large JSON messages + ) + async def execute_command( self, prompt: str, @@ -185,31 +228,53 @@ async def _start_process(self, cmd: List[str], cwd: Path) -> Process: async def _handle_process_output( self, process: Process, stream_callback: Optional[Callable] ) -> ClaudeResponse: - """Handle streaming output from Claude Code.""" - messages = [] + """Memory-optimized output handling with bounded buffers.""" + message_buffer = deque(maxlen=self.max_message_buffer) result = None + parsing_errors = [] - async for line in self._read_stream(process.stdout): + async for line in self._read_stream_bounded(process.stdout): try: msg = json.loads(line) - messages.append(msg) - # Create stream update + # Enhanced validation + if not self._validate_message_structure(msg): + parsing_errors.append(f"Invalid message structure: {line[:100]}") + continue + + message_buffer.append(msg) + + # Process immediately to avoid memory buildup update = self._parse_stream_message(msg) if update and stream_callback: try: await stream_callback(update) except Exception as e: - logger.warning("Stream callback failed", error=str(e)) + logger.warning( + "Stream callback failed", + error=str(e), + update_type=update.type, + ) # Check for final result if msg.get("type") == "result": result = msg - except json.JSONDecodeError: - logger.warning("Failed to parse JSON line", line=line) + except json.JSONDecodeError as e: + parsing_errors.append(f"JSON decode error: {e}") + logger.warning( + "Failed to parse JSON line", line=line[:200], error=str(e) + ) continue + # Enhanced error reporting + if parsing_errors: + logger.warning( + "Parsing errors encountered", + count=len(parsing_errors), + errors=parsing_errors[:5], + ) + # Wait for process to complete return_code = await process.wait() @@ -259,7 +324,7 @@ async def _handle_process_output( logger.error("No result message received from Claude Code") raise ClaudeParsingError("No result message received from Claude Code") - return self._parse_result(result, messages) + return self._parse_result(result, list(message_buffer)) async def _read_stream(self, stream) -> AsyncIterator[str]: """Read lines from stream.""" @@ -269,44 +334,187 @@ async def _read_stream(self, stream) -> AsyncIterator[str]: break yield line.decode("utf-8", errors="replace").strip() + async def _read_stream_bounded(self, stream) -> AsyncIterator[str]: + """Read stream with memory bounds to prevent excessive memory usage.""" + buffer = b"" + + while True: + chunk = await stream.read(self.streaming_buffer_size) + if not chunk: + break + + buffer += chunk + + # Process complete lines + while b"\n" in buffer: + line, buffer = buffer.split(b"\n", 1) + yield line.decode("utf-8", errors="replace").strip() + + # Process remaining buffer + if buffer: + yield buffer.decode("utf-8", errors="replace").strip() + def _parse_stream_message(self, msg: Dict) -> Optional[StreamUpdate]: - """Parse streaming message into update.""" + """Enhanced parsing with comprehensive message type support.""" msg_type = msg.get("type") + # Add support for more message types if msg_type == "assistant": - # Extract content and tool calls - message = msg.get("message", {}) - content_blocks = message.get("content", []) - - # Get text content - text_content = [] - tool_calls = [] - - for block in content_blocks: - if block.get("type") == "text": - text_content.append(block.get("text", "")) - elif block.get("type") == "tool_use": - tool_calls.append( - {"name": block.get("name"), "input": block.get("input", {})} - ) + return self._parse_assistant_message(msg) + elif msg_type == "tool_result": + return self._parse_tool_result_message(msg) + elif msg_type == "user": + return self._parse_user_message(msg) + elif msg_type == "system": + return self._parse_system_message(msg) + elif msg_type == "error": + return self._parse_error_message(msg) + elif msg_type == "progress": + return self._parse_progress_message(msg) + + # Unknown message type - log and continue + logger.debug("Unknown message type", msg_type=msg_type, msg=msg) + return None - return StreamUpdate( - type="assistant", - content="\n".join(text_content) if text_content else None, - tool_calls=tool_calls if tool_calls else None, - ) + def _parse_assistant_message(self, msg: Dict) -> StreamUpdate: + """Parse assistant message with enhanced context.""" + message = msg.get("message", {}) + content_blocks = message.get("content", []) + + # Get text content + text_content = [] + tool_calls = [] + + for block in content_blocks: + if block.get("type") == "text": + text_content.append(block.get("text", "")) + elif block.get("type") == "tool_use": + tool_calls.append( + { + "name": block.get("name"), + "input": block.get("input", {}), + "id": block.get("id"), + } + ) + + return StreamUpdate( + type="assistant", + content="\n".join(text_content) if text_content else None, + tool_calls=tool_calls if tool_calls else None, + timestamp=msg.get("timestamp"), + session_context={"session_id": msg.get("session_id")}, + execution_id=msg.get("id"), + ) + + def _parse_tool_result_message(self, msg: Dict) -> StreamUpdate: + """Parse tool execution results.""" + result = msg.get("result", {}) + content = result.get("content") if isinstance(result, dict) else str(result) + + return StreamUpdate( + type="tool_result", + content=content, + metadata={ + "tool_use_id": msg.get("tool_use_id"), + "is_error": ( + result.get("is_error", False) if isinstance(result, dict) else False + ), + "execution_time_ms": ( + result.get("execution_time_ms") + if isinstance(result, dict) + else None + ), + }, + timestamp=msg.get("timestamp"), + session_context={"session_id": msg.get("session_id")}, + error_info={"message": content} if result.get("is_error", False) else None, + ) + + def _parse_user_message(self, msg: Dict) -> StreamUpdate: + """Parse user message.""" + message = msg.get("message", {}) + content = message.get("content", "") + + # Handle both string and block format content + if isinstance(content, list): + text_parts = [] + for block in content: + if isinstance(block, dict) and block.get("type") == "text": + text_parts.append(block.get("text", "")) + elif isinstance(block, str): + text_parts.append(block) + content = "\n".join(text_parts) + + return StreamUpdate( + type="user", + content=content if content else None, + timestamp=msg.get("timestamp"), + session_context={"session_id": msg.get("session_id")}, + ) + + def _parse_system_message(self, msg: Dict) -> StreamUpdate: + """Parse system messages including init and other subtypes.""" + subtype = msg.get("subtype") - elif msg_type == "system" and msg.get("subtype") == "init": + if subtype == "init": # Initial system message with available tools return StreamUpdate( type="system", metadata={ + "subtype": "init", "tools": msg.get("tools", []), "mcp_servers": msg.get("mcp_servers", []), + "model": msg.get("model"), + "cwd": msg.get("cwd"), + "permission_mode": msg.get("permissionMode"), }, + session_context={"session_id": msg.get("session_id")}, + ) + else: + # Other system messages + return StreamUpdate( + type="system", + content=msg.get("message", str(msg)), + metadata={"subtype": subtype}, + timestamp=msg.get("timestamp"), + session_context={"session_id": msg.get("session_id")}, ) - return None + def _parse_error_message(self, msg: Dict) -> StreamUpdate: + """Parse error messages.""" + error_message = msg.get("message", msg.get("error", str(msg))) + + return StreamUpdate( + type="error", + content=error_message, + error_info={ + "message": error_message, + "code": msg.get("code"), + "subtype": msg.get("subtype"), + }, + timestamp=msg.get("timestamp"), + session_context={"session_id": msg.get("session_id")}, + ) + + def _parse_progress_message(self, msg: Dict) -> StreamUpdate: + """Parse progress update messages.""" + return StreamUpdate( + type="progress", + content=msg.get("message", msg.get("status")), + progress={ + "percentage": msg.get("percentage"), + "step": msg.get("step"), + "total_steps": msg.get("total_steps"), + "operation": msg.get("operation"), + }, + timestamp=msg.get("timestamp"), + session_context={"session_id": msg.get("session_id")}, + ) + + def _validate_message_structure(self, msg: Dict) -> bool: + """Validate message has required structure.""" + required_fields = ["type"] + return all(field in msg for field in required_fields) def _parse_result(self, result: Dict, messages: List[Dict]) -> ClaudeResponse: """Parse final result message.""" diff --git a/src/claude/sdk_integration.py b/src/claude/sdk_integration.py new file mode 100644 index 00000000..9ccf9641 --- /dev/null +++ b/src/claude/sdk_integration.py @@ -0,0 +1,439 @@ +"""Claude Code Python SDK integration. + +Features: +- Native Claude Code SDK integration +- Async streaming support +- Tool execution management +- Session persistence +""" + +import asyncio +import os +import uuid +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, AsyncIterator, Callable, Dict, List, Optional + +import structlog +from claude_code_sdk import ( + ClaudeCodeOptions, + ClaudeSDKError, + CLIConnectionError, + CLINotFoundError, + Message, + ProcessError, + query, +) +from claude_code_sdk.types import ( + AssistantMessage, + ResultMessage, + TextBlock, + ToolResultBlock, + ToolUseBlock, + UserMessage, +) + +from ..config.settings import Settings +from .exceptions import ( + ClaudeParsingError, + ClaudeProcessError, + ClaudeTimeoutError, +) + +logger = structlog.get_logger() + + +def find_claude_cli(claude_cli_path: Optional[str] = None) -> Optional[str]: + """Find Claude CLI in common locations.""" + import glob + import shutil + + # First check if a specific path was provided via config or env + if claude_cli_path: + if os.path.exists(claude_cli_path) and os.access(claude_cli_path, os.X_OK): + return claude_cli_path + + # Check CLAUDE_CLI_PATH environment variable + env_path = os.environ.get("CLAUDE_CLI_PATH") + if env_path and os.path.exists(env_path) and os.access(env_path, os.X_OK): + return env_path + + # Check if claude is already in PATH + claude_path = shutil.which("claude") + if claude_path: + return claude_path + + # Check common installation locations + common_paths = [ + # NVM installations + os.path.expanduser("~/.nvm/versions/node/*/bin/claude"), + # Direct npm global install + os.path.expanduser("~/.npm-global/bin/claude"), + os.path.expanduser("~/node_modules/.bin/claude"), + # System locations + "/usr/local/bin/claude", + "/usr/bin/claude", + # Windows locations (for cross-platform support) + os.path.expanduser("~/AppData/Roaming/npm/claude.cmd"), + ] + + for pattern in common_paths: + matches = glob.glob(pattern) + if matches: + # Return the first match + return matches[0] + + return None + + +def update_path_for_claude(claude_cli_path: Optional[str] = None) -> bool: + """Update PATH to include Claude CLI if found.""" + claude_path = find_claude_cli(claude_cli_path) + + if claude_path: + # Add the directory containing claude to PATH + claude_dir = os.path.dirname(claude_path) + current_path = os.environ.get("PATH", "") + + if claude_dir not in current_path: + os.environ["PATH"] = f"{claude_dir}:{current_path}" + logger.info("Updated PATH for Claude CLI", claude_path=claude_path) + + return True + + return False + + +@dataclass +class ClaudeResponse: + """Response from Claude Code SDK.""" + + content: str + session_id: str + cost: float + duration_ms: int + num_turns: int + is_error: bool = False + error_type: Optional[str] = None + tools_used: List[Dict[str, Any]] = field(default_factory=list) + + +@dataclass +class StreamUpdate: + """Streaming update from Claude SDK.""" + + type: str # 'assistant', 'user', 'system', 'result' + content: Optional[str] = None + tool_calls: Optional[List[Dict]] = None + metadata: Optional[Dict] = None + + +class ClaudeSDKManager: + """Manage Claude Code SDK integration.""" + + def __init__(self, config: Settings): + """Initialize SDK manager with configuration.""" + self.config = config + self.active_sessions: Dict[str, Dict[str, Any]] = {} + + # Try to find and update PATH for Claude CLI + if not update_path_for_claude(config.claude_cli_path): + logger.warning( + "Claude CLI not found in PATH or common locations. " + "SDK may fail if Claude is not installed or not in PATH." + ) + + # Set up environment for Claude Code SDK if API key is provided + # If no API key is provided, the SDK will use existing CLI authentication + if config.anthropic_api_key_str: + os.environ["ANTHROPIC_API_KEY"] = config.anthropic_api_key_str + logger.info("Using provided API key for Claude SDK authentication") + else: + logger.info("No API key provided, using existing Claude CLI authentication") + + async def execute_command( + self, + prompt: str, + working_directory: Path, + session_id: Optional[str] = None, + continue_session: bool = False, + stream_callback: Optional[Callable[[StreamUpdate], None]] = None, + ) -> ClaudeResponse: + """Execute Claude Code command via SDK.""" + start_time = asyncio.get_event_loop().time() + + logger.info( + "Starting Claude SDK command", + working_directory=str(working_directory), + session_id=session_id, + continue_session=continue_session, + ) + + try: + # Build Claude Code options + options = ClaudeCodeOptions( + max_turns=self.config.claude_max_turns, + cwd=str(working_directory), + allowed_tools=self.config.claude_allowed_tools, + ) + + # Collect messages + messages = [] + cost = 0.0 + tools_used = [] + + # Execute with streaming and timeout + await asyncio.wait_for( + self._execute_query_with_streaming( + prompt, options, messages, stream_callback + ), + timeout=self.config.claude_timeout_seconds, + ) + + # Extract cost and tools from result message + cost = 0.0 + tools_used = [] + for message in messages: + if isinstance(message, ResultMessage): + cost = getattr(message, "total_cost_usd", 0.0) or 0.0 + tools_used = self._extract_tools_from_messages(messages) + break + + # Calculate duration + duration_ms = int((asyncio.get_event_loop().time() - start_time) * 1000) + + # Get or create session ID + final_session_id = session_id or str(uuid.uuid4()) + + # Update session + self._update_session(final_session_id, messages) + + return ClaudeResponse( + content=self._extract_content_from_messages(messages), + session_id=final_session_id, + cost=cost, + duration_ms=duration_ms, + num_turns=len( + [ + m + for m in messages + if isinstance(m, (UserMessage, AssistantMessage)) + ] + ), + tools_used=tools_used, + ) + + except asyncio.TimeoutError: + logger.error( + "Claude SDK command timed out", + timeout_seconds=self.config.claude_timeout_seconds, + ) + raise ClaudeTimeoutError( + f"Claude SDK timed out after {self.config.claude_timeout_seconds}s" + ) + + except CLINotFoundError as e: + logger.error("Claude CLI not found", error=str(e)) + error_msg = ( + "Claude Code not found. Please ensure Claude is installed:\n" + " npm install -g @anthropic-ai/claude-code\n\n" + "If already installed, try one of these:\n" + " 1. Add Claude to your PATH\n" + " 2. Create a symlink: ln -s $(which claude) /usr/local/bin/claude\n" + " 3. Set CLAUDE_CLI_PATH environment variable" + ) + raise ClaudeProcessError(error_msg) + + except ProcessError as e: + logger.error( + "Claude process failed", + error=str(e), + exit_code=getattr(e, "exit_code", None), + ) + raise ClaudeProcessError(f"Claude process error: {str(e)}") + + except CLIConnectionError as e: + logger.error("Claude connection error", error=str(e)) + raise ClaudeProcessError(f"Failed to connect to Claude: {str(e)}") + + except ClaudeSDKError as e: + logger.error("Claude SDK error", error=str(e)) + raise ClaudeProcessError(f"Claude SDK error: {str(e)}") + + except Exception as e: + # Handle ExceptionGroup from TaskGroup operations (Python 3.11+) + if type(e).__name__ == "ExceptionGroup" or hasattr(e, "exceptions"): + logger.error( + "Task group error in Claude SDK", + error=str(e), + error_type=type(e).__name__, + exception_count=len(getattr(e, "exceptions", [])), + exceptions=[ + str(ex) for ex in getattr(e, "exceptions", [])[:3] + ], # Log first 3 exceptions + ) + # Extract the most relevant exception from the group + exceptions = getattr(e, "exceptions", [e]) + main_exception = exceptions[0] if exceptions else e + raise ClaudeProcessError( + f"Claude SDK task error: {str(main_exception)}" + ) + + # Check if it's an ExceptionGroup disguised as a regular exception + elif hasattr(e, "__notes__") and "TaskGroup" in str(e): + logger.error( + "TaskGroup related error in Claude SDK", + error=str(e), + error_type=type(e).__name__, + ) + raise ClaudeProcessError(f"Claude SDK task error: {str(e)}") + + else: + logger.error( + "Unexpected error in Claude SDK", + error=str(e), + error_type=type(e).__name__, + ) + raise ClaudeProcessError(f"Unexpected error: {str(e)}") + + async def _execute_query_with_streaming( + self, prompt: str, options, messages: List, stream_callback: Optional[Callable] + ) -> None: + """Execute query with streaming and collect messages.""" + try: + async for message in query(prompt=prompt, options=options): + messages.append(message) + + # Handle streaming callback + if stream_callback: + try: + await self._handle_stream_message(message, stream_callback) + except Exception as callback_error: + logger.warning( + "Stream callback failed", + error=str(callback_error), + error_type=type(callback_error).__name__, + ) + # Continue processing even if callback fails + + except Exception as e: + # Handle both ExceptionGroups and regular exceptions + if type(e).__name__ == "ExceptionGroup" or hasattr(e, "exceptions"): + logger.error( + "TaskGroup error in streaming execution", + error=str(e), + error_type=type(e).__name__, + ) + else: + logger.error( + "Error in streaming execution", + error=str(e), + error_type=type(e).__name__, + ) + # Re-raise to be handled by the outer try-catch + raise + + async def _handle_stream_message( + self, message: Message, stream_callback: Callable[[StreamUpdate], None] + ) -> None: + """Handle streaming message from claude-code-sdk.""" + try: + if isinstance(message, AssistantMessage): + # Extract content from assistant message + content = getattr(message, "content", []) + if content and isinstance(content, list): + # Extract text from TextBlock objects + text_parts = [] + for block in content: + if hasattr(block, "text"): + text_parts.append(block.text) + if text_parts: + update = StreamUpdate( + type="assistant", + content="\n".join(text_parts), + ) + await stream_callback(update) + elif content: + # Fallback for non-list content + update = StreamUpdate( + type="assistant", + content=str(content), + ) + await stream_callback(update) + + # Check for tool calls (if available in the message structure) + # Note: This depends on the actual claude-code-sdk message structure + + elif isinstance(message, UserMessage): + content = getattr(message, "content", "") + if content: + update = StreamUpdate( + type="user", + content=content, + ) + await stream_callback(update) + + except Exception as e: + logger.warning("Stream callback failed", error=str(e)) + + def _extract_content_from_messages(self, messages: List[Message]) -> str: + """Extract content from message list.""" + content_parts = [] + + for message in messages: + if isinstance(message, AssistantMessage): + content = getattr(message, "content", []) + if content and isinstance(content, list): + # Extract text from TextBlock objects + for block in content: + if hasattr(block, "text"): + content_parts.append(block.text) + elif content: + # Fallback for non-list content + content_parts.append(str(content)) + + return "\n".join(content_parts) + + def _extract_tools_from_messages( + self, messages: List[Message] + ) -> List[Dict[str, Any]]: + """Extract tools used from message list.""" + tools_used = [] + current_time = asyncio.get_event_loop().time() + + for message in messages: + if isinstance(message, AssistantMessage): + content = getattr(message, "content", []) + if content and isinstance(content, list): + for block in content: + if isinstance(block, ToolUseBlock): + tools_used.append( + { + "name": getattr(block, "tool_name", "unknown"), + "timestamp": current_time, + "input": getattr(block, "tool_input", {}), + } + ) + + return tools_used + + def _update_session(self, session_id: str, messages: List[Message]) -> None: + """Update session data.""" + if session_id not in self.active_sessions: + self.active_sessions[session_id] = { + "messages": [], + "created_at": asyncio.get_event_loop().time(), + } + + session_data = self.active_sessions[session_id] + session_data["messages"] = messages + session_data["last_used"] = asyncio.get_event_loop().time() + + async def kill_all_processes(self) -> None: + """Kill all active processes (no-op for SDK).""" + logger.info("Clearing active SDK sessions", count=len(self.active_sessions)) + self.active_sessions.clear() + + def get_active_process_count(self) -> int: + """Get number of active sessions.""" + return len(self.active_sessions) diff --git a/src/claude/session.py b/src/claude/session.py index 8aafd6e7..2d8921ff 100644 --- a/src/claude/session.py +++ b/src/claude/session.py @@ -11,12 +11,18 @@ from dataclasses import dataclass, field from datetime import datetime, timedelta from pathlib import Path -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional, Union import structlog from ..config.settings import Settings -from .integration import ClaudeResponse + +if TYPE_CHECKING: + from .integration import ClaudeResponse as CLIClaudeResponse + from .sdk_integration import ClaudeResponse as SDKClaudeResponse + +# Union type for both CLI and SDK responses +ClaudeResponse = Union["CLIClaudeResponse", "SDKClaudeResponse"] logger = structlog.get_logger() diff --git a/src/config/settings.py b/src/config/settings.py index afafac78..d549aba5 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -49,7 +49,19 @@ class Settings(BaseSettings): ) # Claude settings - claude_binary_path: str = Field("claude", description="Path to Claude CLI binary") + claude_binary_path: Optional[str] = Field( + None, description="Path to Claude CLI binary (deprecated)" + ) + claude_cli_path: Optional[str] = Field( + None, description="Path to Claude CLI executable" + ) + anthropic_api_key: Optional[SecretStr] = Field( + None, + description="Anthropic API key for Claude SDK (optional if logged into Claude CLI)", + ) + claude_model: str = Field( + "claude-3-5-sonnet-20241022", description="Claude model to use" + ) claude_max_turns: int = Field( DEFAULT_CLAUDE_MAX_TURNS, description="Max conversation turns" ) @@ -59,6 +71,7 @@ class Settings(BaseSettings): claude_max_cost_per_user: float = Field( DEFAULT_CLAUDE_MAX_COST_PER_USER, description="Max cost per user" ) + use_sdk: bool = Field(True, description="Use Python SDK instead of CLI subprocess") claude_allowed_tools: Optional[List[str]] = Field( default=[ "Read", @@ -220,3 +233,12 @@ def auth_secret_str(self) -> Optional[str]: if self.auth_token_secret: return self.auth_token_secret.get_secret_value() return None + + @property + def anthropic_api_key_str(self) -> Optional[str]: + """Get Anthropic API key as string.""" + return ( + self.anthropic_api_key.get_secret_value() + if self.anthropic_api_key + else None + ) diff --git a/src/main.py b/src/main.py index 10b9edd9..e06f532f 100644 --- a/src/main.py +++ b/src/main.py @@ -18,6 +18,7 @@ SessionManager, ToolMonitor, ) +from src.claude.sdk_integration import ClaudeSDKManager from src.config.features import FeatureFlags from src.config.loader import load_config from src.config.settings import Settings @@ -127,15 +128,25 @@ async def create_application(config: Settings) -> Dict[str, Any]: audit_logger = AuditLogger(audit_storage) # Create Claude integration components with persistent storage - process_manager = ClaudeProcessManager(config) session_storage = SQLiteSessionStorage(storage.db_manager) session_manager = SessionManager(config, session_storage) tool_monitor = ToolMonitor(config, security_validator) + # Create Claude manager based on configuration + if config.use_sdk: + logger.info("Using Claude Python SDK integration") + sdk_manager = ClaudeSDKManager(config) + process_manager = None + else: + logger.info("Using Claude CLI subprocess integration") + process_manager = ClaudeProcessManager(config) + sdk_manager = None + # Create main Claude integration facade claude_integration = ClaudeIntegration( config=config, process_manager=process_manager, + sdk_manager=sdk_manager, session_manager=session_manager, tool_monitor=tool_monitor, ) diff --git a/src/utils/constants.py b/src/utils/constants.py index ef90e2b2..ebf9cd6c 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -20,6 +20,9 @@ TELEGRAM_MAX_MESSAGE_LENGTH = 4096 SAFE_MESSAGE_LENGTH = 4000 # Leave room for formatting +# Session limits +MAX_SESSION_LENGTH = 1000 # Maximum messages per session + # File limits MAX_FILE_SIZE_MB = 10 MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024 diff --git a/tests/unit/test_claude/test_sdk_integration.py b/tests/unit/test_claude/test_sdk_integration.py new file mode 100644 index 00000000..962d1b26 --- /dev/null +++ b/tests/unit/test_claude/test_sdk_integration.py @@ -0,0 +1,206 @@ +"""Test Claude SDK integration.""" + +import os +from pathlib import Path +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from claude_code_sdk import ClaudeCodeOptions + +from src.claude.sdk_integration import ClaudeResponse, ClaudeSDKManager, StreamUpdate +from src.config.settings import Settings + + +class TestClaudeSDKManager: + """Test Claude SDK manager.""" + + @pytest.fixture + def config(self, tmp_path): + """Create test config without API key.""" + return Settings( + telegram_bot_token="test:token", + telegram_bot_username="testbot", + approved_directory=tmp_path, + use_sdk=True, + claude_timeout_seconds=2, # Short timeout for testing + ) + + @pytest.fixture + def sdk_manager(self, config): + """Create SDK manager.""" + return ClaudeSDKManager(config) + + async def test_sdk_manager_initialization_with_api_key(self, tmp_path): + """Test SDK manager initialization with API key.""" + from src.config.settings import Settings + + # Test with API key provided + config_with_key = Settings( + telegram_bot_token="test:token", + telegram_bot_username="testbot", + approved_directory=tmp_path, + anthropic_api_key="test-api-key", + use_sdk=True, + claude_timeout_seconds=2, + ) + + # Store original env var + original_api_key = os.environ.get("ANTHROPIC_API_KEY") + + try: + manager = ClaudeSDKManager(config_with_key) + + # Check that API key was set in environment + assert os.environ.get("ANTHROPIC_API_KEY") == "test-api-key" + assert manager.active_sessions == {} + + finally: + # Restore original env var + if original_api_key: + os.environ["ANTHROPIC_API_KEY"] = original_api_key + elif "ANTHROPIC_API_KEY" in os.environ: + del os.environ["ANTHROPIC_API_KEY"] + + async def test_sdk_manager_initialization_without_api_key(self, config): + """Test SDK manager initialization without API key (uses CLI auth).""" + # Store original env var + original_api_key = os.environ.get("ANTHROPIC_API_KEY") + + try: + # Remove any existing API key + if "ANTHROPIC_API_KEY" in os.environ: + del os.environ["ANTHROPIC_API_KEY"] + + manager = ClaudeSDKManager(config) + + # Check that no API key was set (should use CLI auth) + assert config.anthropic_api_key_str is None + assert manager.active_sessions == {} + + finally: + # Restore original env var + if original_api_key: + os.environ["ANTHROPIC_API_KEY"] = original_api_key + + async def test_execute_command_success(self, sdk_manager): + """Test successful command execution.""" + from claude_code_sdk.types import AssistantMessage, ResultMessage + + # Mock the claude-code-sdk query function + async def mock_query(prompt, options): + yield AssistantMessage(content="Test response") + yield ResultMessage( + subtype="success", + duration_ms=1000, + duration_api_ms=800, + is_error=False, + num_turns=1, + session_id="test-session", + total_cost_usd=0.05, + result="Success", + ) + + with patch("src.claude.sdk_integration.query", side_effect=mock_query): + response = await sdk_manager.execute_command( + prompt="Test prompt", + working_directory=Path("/test"), + session_id="test-session", + ) + + # Verify response + assert isinstance(response, ClaudeResponse) + assert response.session_id == "test-session" + assert response.duration_ms >= 0 # Can be 0 in tests + assert not response.is_error + assert response.cost == 0.05 + + async def test_execute_command_with_streaming(self, sdk_manager): + """Test command execution with streaming callback.""" + from claude_code_sdk.types import AssistantMessage, ResultMessage + + stream_updates = [] + + async def stream_callback(update: StreamUpdate): + stream_updates.append(update) + + # Mock the claude-code-sdk query function + async def mock_query(prompt, options): + yield AssistantMessage(content="Test response") + yield ResultMessage( + subtype="success", + duration_ms=1000, + duration_api_ms=800, + is_error=False, + num_turns=1, + session_id="test-session", + total_cost_usd=0.05, + result="Success", + ) + + with patch("src.claude.sdk_integration.query", side_effect=mock_query): + response = await sdk_manager.execute_command( + prompt="Test prompt", + working_directory=Path("/test"), + stream_callback=stream_callback, + ) + + # Verify streaming was called + assert len(stream_updates) > 0 + assert any(update.type == "assistant" for update in stream_updates) + + async def test_execute_command_timeout(self, sdk_manager): + """Test command execution timeout.""" + import asyncio + + # Mock a hanging operation - return async generator that never yields + async def mock_hanging_query(prompt, options): + await asyncio.sleep(5) # This should timeout (config has 2s timeout) + yield # This will never be reached + + from src.claude.exceptions import ClaudeTimeoutError + + with patch("src.claude.sdk_integration.query", side_effect=mock_hanging_query): + with pytest.raises(ClaudeTimeoutError): + await sdk_manager.execute_command( + prompt="Test prompt", + working_directory=Path("/test"), + ) + + async def test_session_management(self, sdk_manager): + """Test session management.""" + from claude_code_sdk.types import AssistantMessage + + session_id = "test-session" + messages = [AssistantMessage(content="test")] + + # Update session + sdk_manager._update_session(session_id, messages) + + # Verify session was created + assert session_id in sdk_manager.active_sessions + session_data = sdk_manager.active_sessions[session_id] + assert session_data["messages"] == messages + + async def test_kill_all_processes(self, sdk_manager): + """Test killing all processes (clearing sessions).""" + # Add some active sessions + sdk_manager.active_sessions["session1"] = {"test": "data"} + sdk_manager.active_sessions["session2"] = {"test": "data2"} + + assert len(sdk_manager.active_sessions) == 2 + + # Kill all processes + await sdk_manager.kill_all_processes() + + # Sessions should be cleared + assert len(sdk_manager.active_sessions) == 0 + + def test_get_active_process_count(self, sdk_manager): + """Test getting active process count.""" + assert sdk_manager.get_active_process_count() == 0 + + # Add sessions + sdk_manager.active_sessions["session1"] = {"test": "data"} + sdk_manager.active_sessions["session2"] = {"test": "data2"} + + assert sdk_manager.get_active_process_count() == 2 diff --git a/tests/unit/test_claude/test_session.py b/tests/unit/test_claude/test_session.py index d5617616..6bd6e8c2 100644 --- a/tests/unit/test_claude/test_session.py +++ b/tests/unit/test_claude/test_session.py @@ -5,7 +5,7 @@ import pytest -from src.claude.integration import ClaudeResponse +from src.claude.sdk_integration import ClaudeResponse from src.claude.session import ClaudeSession, InMemorySessionStorage, SessionManager from src.config.settings import Settings