diff --git a/prompts/sentiment_analysis.py b/prompts/sentiment_analysis.py index 6f745e2..b780712 100644 --- a/prompts/sentiment_analysis.py +++ b/prompts/sentiment_analysis.py @@ -1,6 +1,7 @@ def generate_expert_sentiment_analysis_prompt(text): prompt = f""" - You are a world-renowned expert in linguistic analysis, psychology, and sentiment detection. Your task is to perform an exhaustive sentiment analysis on the given text, providing unparalleled insight into its emotional tone, underlying attitudes, and implicit biases. + You are a world-renowned expert in linguistic analysis, psychology, and sentiment detection. + Your task is to perform an exhaustive sentiment analysis on the given text, providing unparalleled insight into its emotional tone, underlying attitudes, and implicit biases. Text for analysis: "{text}" @@ -144,4 +145,4 @@ def simulate_expert_sentiment_analysis_response(prompt): This sentiment analysis demonstrates the text's complexity, balancing multiple viewpoints while subtly leaning towards a skeptical, slightly negative overall sentiment. The high confidence score reflects the comprehensive analysis process, while acknowledging the potential for alternative interpretations. -''' \ No newline at end of file +''' diff --git a/prompts/summarisation.py b/prompts/summarisation.py index e5e03e0..88e54ab 100644 --- a/prompts/summarisation.py +++ b/prompts/summarisation.py @@ -1,6 +1,7 @@ def generate_expert_summarization_prompt(text, desired_length): prompt = f""" - You are a world-renowned expert in linguistics, information synthesis, and content distillation. Your task is to create an exceptionally high-quality summary of the given text, capturing its essence with unparalleled precision and clarity. + You are a world-renowned expert in linguistics, information synthesis, and content distillation. + Your task is to create an exceptionally high-quality summary of the given text, capturing its essence with unparalleled precision and clarity. Text to summarize: "{text}" diff --git a/requirements.txt b/requirements.txt index f508ddf..dfd4b65 100644 --- a/requirements.txt +++ b/requirements.txt @@ -63,3 +63,6 @@ boto3~=1.35.71 prometheus_client~=0.21.0 botocore~=1.35.71 sympy~=1.13.1 + +rouge_score~=0.1.2 +bert-score~=0.3.13 diff --git a/src/chain_management.py b/src/chain_management.py index 8e106e7..5707ab1 100644 --- a/src/chain_management.py +++ b/src/chain_management.py @@ -3,6 +3,7 @@ """ from dataclasses import dataclass from typing import List, Dict, Any, Callable +from asyncio import sleep from src.advanced_evaluation import LLMDevEnvironment @@ -18,19 +19,19 @@ class ChainManager: def __init__(self, env: 'LLMDevEnvironment'): self.env = env self.chains: Dict[str, List[ChainStep]] = {} - + def create_chain(self, name: str, steps: List[ChainStep]): """Create a new processing chain.""" self.chains[name] = steps - + async def execute_chain(self, chain_name: str, input_data: Any) -> Dict[str, Any]: """Execute a processing chain.""" if chain_name not in self.chains: raise ValueError(f"Chain {chain_name} not found") - + results = {'input': input_data} current_data = input_data - + for step in self.chains[chain_name]: try: current_data = await self._execute_step(step, current_data) @@ -39,11 +40,26 @@ async def execute_chain(self, chain_name: str, input_data: Any) -> Dict[str, Any self.env.logger.error(f"Error in chain {chain_name}, step {step.name}: {str(e)}") results['error'] = str(e) break - + return results - + async def _execute_step(self, step: ChainStep, input_data: Any) -> Any: """Execute a single step with retry logic.""" if step.retry_config: return await self._retry_execution(step, input_data) return await step.function(input_data, **step.config) + + async def _retry_execution(self, step: ChainStep, input_data: Any) -> Any: + """Retry execution logic for a step.""" + retries = step.retry_config.get('retries', 3) + delay = step.retry_config.get('delay', 1) + + for attempt in range(retries): + try: + return await step.function(input_data, **step.config) + except Exception as e: + if attempt < retries - 1: + self.env.logger.warning(f"Retry {attempt + 1} for step {step.name} due to error: {str(e)}") + await sleep(delay) + else: + raise e diff --git a/src/evaluation.py b/src/evaluation.py index 6e08040..dec2a19 100644 --- a/src/evaluation.py +++ b/src/evaluation.py @@ -4,7 +4,7 @@ from typing import List, Dict, Any import numpy as np from sklearn.metrics.pairwise import cosine_similarity -from rouge_score import rouge_scorer +import rouge_score import nltk from nltk.translate.bleu_score import sentence_bleu from bert_score import score @@ -40,7 +40,7 @@ async def calculate_metrics(self, def _calculate_rouge(self, generated_text: str, reference_text: str) -> Dict[str, float]: """Calculate ROUGE scores.""" - scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) + scorer = rouge_score.rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) scores = scorer.score(reference_text, generated_text) return {key: value.fmeasure for key, value in scores.items()} @@ -73,4 +73,4 @@ async def _calculate_coherence(self, text: str) -> float: )[0][0] coherence_scores.append(similarity) - return np.mean(coherence_scores) \ No newline at end of file + return np.mean(coherence_scores) diff --git a/src/task_orchestration.py b/src/task_orchestration.py index 7f0669f..7d8c00d 100644 --- a/src/task_orchestration.py +++ b/src/task_orchestration.py @@ -8,6 +8,8 @@ import networkx as nx from pydantic import BaseModel +from src.agent_system import AgentRole, AgentTeam + class TaskDefinition(BaseModel): name: str @@ -26,7 +28,7 @@ class TaskResult(BaseModel): error: Optional[str] class TaskOrchestrator: - def __init__(self, env: 'LLMDevEnvironment'): + def __init__(self, env: "LLM-Dev"): self.env = env self.task_definitions: Dict[str, TaskDefinition] = {} self.task_results: Dict[str, TaskResult] = {} @@ -150,4 +152,4 @@ async def _run_task(self, team.add_agent(AgentRole(role)) # Execute task with agent team - return await team.execute_task(task_input) \ No newline at end of file + return await team.execute_task(task_input)