Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
dr-gareth-roberts committed Nov 28, 2024
1 parent 0600b6e commit b2a492a
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 14 deletions.
5 changes: 3 additions & 2 deletions prompts/sentiment_analysis.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
def generate_expert_sentiment_analysis_prompt(text):
prompt = f"""
You are a world-renowned expert in linguistic analysis, psychology, and sentiment detection. Your task is to perform an exhaustive sentiment analysis on the given text, providing unparalleled insight into its emotional tone, underlying attitudes, and implicit biases.
You are a world-renowned expert in linguistic analysis, psychology, and sentiment detection.
Your task is to perform an exhaustive sentiment analysis on the given text, providing unparalleled insight into its emotional tone, underlying attitudes, and implicit biases.
Text for analysis: "{text}"
Expand Down Expand Up @@ -144,4 +145,4 @@ def simulate_expert_sentiment_analysis_response(prompt):
This sentiment analysis demonstrates the text's complexity, balancing multiple viewpoints while subtly leaning towards a skeptical, slightly negative overall sentiment.
The high confidence score reflects the comprehensive analysis process, while acknowledging the potential for alternative interpretations.
'''
'''
3 changes: 2 additions & 1 deletion prompts/summarisation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
def generate_expert_summarization_prompt(text, desired_length):
prompt = f"""
You are a world-renowned expert in linguistics, information synthesis, and content distillation. Your task is to create an exceptionally high-quality summary of the given text, capturing its essence with unparalleled precision and clarity.
You are a world-renowned expert in linguistics, information synthesis, and content distillation.
Your task is to create an exceptionally high-quality summary of the given text, capturing its essence with unparalleled precision and clarity.
Text to summarize: "{text}"
Expand Down
3 changes: 3 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,6 @@ boto3~=1.35.71
prometheus_client~=0.21.0
botocore~=1.35.71
sympy~=1.13.1

rouge_score~=0.1.2
bert-score~=0.3.13
28 changes: 22 additions & 6 deletions src/chain_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""
from dataclasses import dataclass
from typing import List, Dict, Any, Callable
from asyncio import sleep

from src.advanced_evaluation import LLMDevEnvironment

Expand All @@ -18,19 +19,19 @@ class ChainManager:
def __init__(self, env: 'LLMDevEnvironment'):
self.env = env
self.chains: Dict[str, List[ChainStep]] = {}

def create_chain(self, name: str, steps: List[ChainStep]):
"""Create a new processing chain."""
self.chains[name] = steps

async def execute_chain(self, chain_name: str, input_data: Any) -> Dict[str, Any]:
"""Execute a processing chain."""
if chain_name not in self.chains:
raise ValueError(f"Chain {chain_name} not found")

results = {'input': input_data}
current_data = input_data

for step in self.chains[chain_name]:
try:
current_data = await self._execute_step(step, current_data)
Expand All @@ -39,11 +40,26 @@ async def execute_chain(self, chain_name: str, input_data: Any) -> Dict[str, Any
self.env.logger.error(f"Error in chain {chain_name}, step {step.name}: {str(e)}")
results['error'] = str(e)
break

return results

async def _execute_step(self, step: ChainStep, input_data: Any) -> Any:
"""Execute a single step with retry logic."""
if step.retry_config:
return await self._retry_execution(step, input_data)
return await step.function(input_data, **step.config)

async def _retry_execution(self, step: ChainStep, input_data: Any) -> Any:
"""Retry execution logic for a step."""
retries = step.retry_config.get('retries', 3)
delay = step.retry_config.get('delay', 1)

for attempt in range(retries):
try:
return await step.function(input_data, **step.config)
except Exception as e:
if attempt < retries - 1:
self.env.logger.warning(f"Retry {attempt + 1} for step {step.name} due to error: {str(e)}")
await sleep(delay)
else:
raise e
6 changes: 3 additions & 3 deletions src/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import List, Dict, Any
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from rouge_score import rouge_scorer
import rouge_score
import nltk
from nltk.translate.bleu_score import sentence_bleu
from bert_score import score
Expand Down Expand Up @@ -40,7 +40,7 @@ async def calculate_metrics(self,

def _calculate_rouge(self, generated_text: str, reference_text: str) -> Dict[str, float]:
"""Calculate ROUGE scores."""
scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True)
scorer = rouge_score.rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True)
scores = scorer.score(reference_text, generated_text)
return {key: value.fmeasure for key, value in scores.items()}

Expand Down Expand Up @@ -73,4 +73,4 @@ async def _calculate_coherence(self, text: str) -> float:
)[0][0]
coherence_scores.append(similarity)

return np.mean(coherence_scores)
return np.mean(coherence_scores)
6 changes: 4 additions & 2 deletions src/task_orchestration.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
import networkx as nx
from pydantic import BaseModel

from src.agent_system import AgentRole, AgentTeam


class TaskDefinition(BaseModel):
name: str
Expand All @@ -26,7 +28,7 @@ class TaskResult(BaseModel):
error: Optional[str]

class TaskOrchestrator:
def __init__(self, env: 'LLMDevEnvironment'):
def __init__(self, env: "LLM-Dev"):
self.env = env
self.task_definitions: Dict[str, TaskDefinition] = {}
self.task_results: Dict[str, TaskResult] = {}
Expand Down Expand Up @@ -150,4 +152,4 @@ async def _run_task(self,
team.add_agent(AgentRole(role))

# Execute task with agent team
return await team.execute_task(task_input)
return await team.execute_task(task_input)

0 comments on commit b2a492a

Please sign in to comment.