Skip to content

Commit

Permalink
Merge pull request #32 from parea-ai/feat-log-endpoint
Browse files Browse the repository at this point in the history
Feat log endpoint
  • Loading branch information
joschkabraun committed Aug 21, 2023
2 parents 4cceabe + 4644ffa commit 99c4133
Show file tree
Hide file tree
Showing 3 changed files with 116 additions and 3 deletions.
68 changes: 67 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ from parea.schemas.models import Completion, UseDeployedPrompt, CompletionRespon

load_dotenv()

p = Parea(api_key=os.getenv("API_KEY"))
p = Parea(api_key=os.getenv("PAREA_API_KEY"))

# You will find this deployment_id in the Parea dashboard
deployment_id = '<DEPLOYMENT_ID>'
Expand Down Expand Up @@ -78,6 +78,72 @@ async def main_async():
print(deployed_prompt)
```


### Logging results from LLM providers

```python
import os

from dotenv import load_dotenv

import openai
from parea import Parea
from parea.schemas.models import LogRequest

load_dotenv()

openai.api_key = os.getenv("OPENAI_API_KEY")
p = Parea(api_key=os.getenv("PAREA_API_KEY"))


# define your OpenAI call as you would normally
x = "Golang"
y = "Fiber"
inputs = {"x": x, "y": y}
messages = [
{"role": "user", "content": f"Write a hello world program using {x} and the {y} framework."},
]
model = "gpt-3.5-turbo"
model_params = {
"temperature": 0.7,
"top_p": 1.0,
}
completion = openai.ChatCompletion.create(
model=model,
messages=messages,
**model_params
)
output = completion.choices[0].message['content']

# the LogRequest schema
log_request: LogRequest = LogRequest(
status="success",
name='Test Log',
llm_inputs={
"x": x,
"y": y,
},
llm_configuration={
'model': model,
'messages': messages,
'model_params': model_params,
},
output=output,
input_tokens=completion.usage['prompt_tokens'],
output_tokens=completion.usage['completion_tokens'],
total_tokens=completion.usage['total_tokens'],
)


def main():
p.log(data=log_request)


async def main_async():
await p.alog(data=log_request)
```


### Open source community features

Ready-to-use [Pull Requests templates](https://github.com/parea-ai/parea-sdk/blob/master/.github/PULL_REQUEST_TEMPLATE.md)
Expand Down
17 changes: 16 additions & 1 deletion parea/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@
from attrs import asdict, define, field

from parea.api_client import HTTPClient
from parea.schemas.models import Completion, CompletionResponse, FeedbackRequest, UseDeployedPrompt, UseDeployedPromptResponse
from parea.schemas.models import Completion, CompletionResponse, FeedbackRequest, LogRequest, UseDeployedPrompt, UseDeployedPromptResponse

COMPLETION_ENDPOINT = "/completion"
DEPLOYED_PROMPT_ENDPOINT = "/deployed-prompt"
RECORD_FEEDBACK_ENDPOINT = "/feedback"
LOG_ENDPOINT = "/log"


@define
Expand Down Expand Up @@ -64,6 +65,20 @@ async def arecord_feedback(self, data: FeedbackRequest) -> None:
data=asdict(data),
)

def log(self, data: LogRequest) -> None:
self._client.request(
"POST",
LOG_ENDPOINT,
data=asdict(data),
)

async def alog(self, data: LogRequest) -> None:
await self._client.request_async(
"POST",
LOG_ENDPOINT,
data=asdict(data),
)


def gen_trace_id() -> str:
"""Generate a unique trace id for each chain of requests"""
Expand Down
34 changes: 33 additions & 1 deletion parea/schemas/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ class Message:

@define
class ModelParams:
model: str
temp: float = 0.5
top_p: float = 1.0
frequency_penalty: float = 0.0
Expand Down Expand Up @@ -108,3 +107,36 @@ class FeedbackRequest:
inference_id: Optional[str] = None
name: Optional[str] = None
target: Optional[str] = None


@define
class LogRequest:
inference_id: Optional[str] = None
trace_id: Optional[str] = None
trace_name: Optional[str] = None
end_user_identifier: Optional[str] = None
error: Optional[str] = None
status: Optional[str] = None

start_timestamp: str = ""
end_timestamp: str = ""
duration: float = 0.0

deployment_id: Optional[str] = None
name: Optional[str] = None
evaluation_metrics_ids: Optional[list[int]] = None
metadata: Optional[dict] = None
cache_hit: bool = False
target: Optional[str] = None
tags: Optional[list[str]] = None

llm_inputs: Optional[dict[str, Any]] = None
llm_configuration: LLMInputs = LLMInputs()

output: Optional[str] = ""
latency: Optional[float] = 0.0
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
total_tokens: Optional[int] = 0
cost: Optional[float] = None
feedback_score: Optional[float] = None

0 comments on commit 99c4133

Please sign in to comment.