Skip to content

Commit

Permalink
Merge pull request #255 from parea-ai/refactor-log-schema
Browse files Browse the repository at this point in the history
refactor: move log into its own class
  • Loading branch information
joschkabraun authored Dec 4, 2023
2 parents 36acf76 + 78b1110 commit 4b7c5dd
Show file tree
Hide file tree
Showing 10 changed files with 70 additions and 62 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ You can define evaluation functions locally or use the ones you have deployed to
If you choose the latter option, the evaluation happens asynchronously and non-blocking.

A fully locally working cookbook can be found [here](parea/cookbook/tracing_and_evaluating_openai_endpoint.py).
Alternatively, you can add the following code to your codebase to get started:
Alternatively, you can add the following code to your codebase to get started:

```python
import os
from parea import init, InMemoryCache
from parea.schemas.models import Log
from parea.schemas.log import Log
from parea.utils.trace_utils import trace

init(api_key=os.getenv("PAREA_API_KEY"), cache=InMemoryCache()) # use InMemoryCache if you don't have a Parea API key
Expand Down
3 changes: 2 additions & 1 deletion parea/cookbook/tracing_with_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@

from parea import Parea
from parea.helpers import to_date_and_time_string
from parea.schemas.models import Completion, CompletionResponse, FeedbackRequest, LLMInputs, Message, ModelParams, Role
from parea.schemas.log import LLMInputs, Message, ModelParams, Role
from parea.schemas.models import Completion, CompletionResponse, FeedbackRequest
from parea.utils.trace_utils import get_current_trace_id, trace

load_dotenv()
Expand Down
3 changes: 2 additions & 1 deletion parea/cookbook/tracing_without_deployed_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from dotenv import load_dotenv

from parea import Parea
from parea.schemas.models import Completion, CompletionResponse, FeedbackRequest, LLMInputs, Message, ModelParams
from parea.schemas.log import LLMInputs, Message, ModelParams
from parea.schemas.models import Completion, CompletionResponse, FeedbackRequest
from parea.utils.trace_utils import get_current_trace_id, trace

load_dotenv()
Expand Down
2 changes: 1 addition & 1 deletion parea/evals/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import json

from parea.evals.utils import call_openai
from parea.schemas.models import Log
from parea.schemas.log import Log


def goal_success_ratio_factory(use_output: Optional[bool] = False, message_field: Optional[str] = None) -> Callable[[Log], float]:
Expand Down
2 changes: 1 addition & 1 deletion parea/evals/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import re

from parea.evals.utils import call_openai, sent_tokenize
from parea.schemas.models import Log
from parea.schemas.log import Log

one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
Expand Down
2 changes: 1 addition & 1 deletion parea/evals/rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from collections import Counter

from parea.evals.utils import call_openai, embed, safe_json_loads, sent_tokenize
from parea.schemas.models import Log
from parea.schemas.log import Log


def precision_response_context_factory(context_field: Optional[str] = "context") -> Callable[[Log], float]:
Expand Down
Empty file added parea/schemas/__init__.py
Empty file.
57 changes: 57 additions & 0 deletions parea/schemas/log.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from typing import Any, Dict, List, Optional, Union

from enum import Enum

from attr import define


class Role(str, Enum):
user = "user"
assistant = "assistant"
system = "system"
example_user = "example_user"
example_assistant = "example_assistant"


@define
class Message:
content: str
role: Role = Role.user

def to_dict(self) -> dict[str, str]:
return {
"content": self.content,
"role": str(self.role),
}


@define
class ModelParams:
temp: float = 1.0
top_p: float = 1.0
frequency_penalty: float = 0.0
presence_penalty: float = 0.0
max_length: Optional[int] = None


@define
class LLMInputs:
model: Optional[str] = None
provider: Optional[str] = None
model_params: Optional[ModelParams] = None
messages: Optional[List[Message]] = None
functions: Optional[List[Any]] = None
function_call: Optional[Union[str, dict[str, str]]] = None


@define
class Log:
configuration: LLMInputs = LLMInputs()
inputs: Optional[Dict[str, str]] = None
output: Optional[str] = None
target: Optional[str] = None
latency: Optional[float] = 0.0
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
total_tokens: Optional[int] = 0
cost: Optional[float] = 0.0
56 changes: 2 additions & 54 deletions parea/schemas/models.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,8 @@
from typing import Any, Dict, List, Optional, Union

from enum import Enum
from typing import Any, List, Optional

from attrs import define, field, validators


class Role(str, Enum):
user = "user"
assistant = "assistant"
system = "system"
example_user = "example_user"
example_assistant = "example_assistant"


@define
class Message:
content: str
role: Role = Role.user

def to_dict(self) -> dict[str, str]:
return {
"content": self.content,
"role": str(self.role),
}


@define
class ModelParams:
temp: float = 1.0
top_p: float = 1.0
frequency_penalty: float = 0.0
presence_penalty: float = 0.0
max_length: Optional[int] = None


@define
class LLMInputs:
model: Optional[str] = None
provider: Optional[str] = None
model_params: Optional[ModelParams] = None
messages: Optional[List[Message]] = None
functions: Optional[List[Any]] = None
function_call: Optional[Union[str, dict[str, str]]] = None
from parea.schemas.log import LLMInputs, Log


@define
Expand Down Expand Up @@ -121,19 +82,6 @@ class NamedEvaluationScore:
score: float = field(validator=[validators.ge(0), validators.le(1)])


@define
class Log:
configuration: LLMInputs = LLMInputs()
inputs: Optional[Dict[str, str]] = None
output: Optional[str] = None
target: Optional[str] = None
latency: Optional[float] = 0.0
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
total_tokens: Optional[int] = 0
cost: Optional[float] = 0.0


@define
class TraceLog(Log):
trace_id: Optional[str] = field(default=None, validator=validators.instance_of(str))
Expand Down
3 changes: 2 additions & 1 deletion parea/wrapper/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ def convert_to_openai_object(**kwargs):


from ..cache.cache import Cache
from ..schemas.models import CacheRequest, LLMInputs, ModelParams, TraceLog
from ..schemas.log import LLMInputs, ModelParams
from ..schemas.models import CacheRequest, TraceLog
from ..utils.trace_utils import trace_data
from .wrapper import Wrapper

Expand Down

0 comments on commit 4b7c5dd

Please sign in to comment.