Skip to content

Commit

Permalink
Merge pull request #45 from parea-ai/PAI-237-one-line-tracing-of-oai-…
Browse files Browse the repository at this point in the history
…calls-via-sdk

feat: add one line tracing for OAI
  • Loading branch information
joschkabraun committed Aug 29, 2023
2 parents 2e80fe0 + 6be57f7 commit 8739aa4
Show file tree
Hide file tree
Showing 11 changed files with 871 additions and 76 deletions.
1 change: 1 addition & 0 deletions parea/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

from importlib import metadata as importlib_metadata

import parea.wrapper # noqa: F401
from parea.client import Parea


Expand Down
134 changes: 67 additions & 67 deletions parea/cookbook/tracing_with_Parea_sdk.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
"\n",
"## Prerequisites\n",
"\n",
"First, install the Parea-ai SDK package. If you have an account with Parea, your LLM API Keys will be automatically used, so you won't need to redefine them here. \n",
"All you need is your Parea API key. Follow the instructions in the [docs](https://docs.parea.ai/api-reference/authentication) to get your api keys. "
"First, install the Parea-ai SDK package. If you have an account with Parea, your LLM API Keys will be automatically used, so you won't need to redefine them here.\n",
"All you need is your Parea API key. Follow the instructions in the [docs](https://docs.parea.ai/api-reference/authentication) to get your api keys."
]
},
{
Expand Down Expand Up @@ -113,50 +113,50 @@
"# We pass the deployment_id and the required inputs to the completion function along with the trace_id\n",
"@trace\n",
"def argument_generator(query: str, additional_description: str = \"\") -> str:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-Ar-Oi14-nBxHUiradyql9\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" },\n",
" )\n",
" ).content\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-Ar-Oi14-nBxHUiradyql9\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" },\n",
" )\n",
" ).content\n",
"\n",
"\n",
"@trace\n",
"def critic(argument: str) -> str:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-W2yPy93tAczYrxkipjli6\",\n",
" llm_inputs={\"argument\": argument},\n",
" )\n",
" ).content\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-W2yPy93tAczYrxkipjli6\",\n",
" llm_inputs={\"argument\": argument},\n",
" )\n",
" ).content\n",
"\n",
"\n",
"@trace\n",
"def refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" ).content\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" ).content\n",
"\n",
"\n",
"# This is the parent function which orchestrates the chaining. We'll define our trace_id and trace_name here\n",
"@trace\n",
"def argument_chain(query: str, additional_description: str = \"\") -> str:\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism)"
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism)"
]
},
{
Expand Down Expand Up @@ -184,8 +184,8 @@
"outputs": [],
"source": [
"result = argument_chain(\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
")\n",
"print(result)"
]
Expand Down Expand Up @@ -222,10 +222,10 @@
"\n",
"@trace\n",
"def argument_chain2(query: str, additional_description: str = \"\") -> tuple[str, str]:\n",
" trace_id = get_current_trace_id()\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism), trace_id"
" trace_id = get_current_trace_id()\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism), trace_id"
]
},
{
Expand All @@ -239,8 +239,8 @@
"outputs": [],
"source": [
"result, trace_id = argument_chain2(\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
")\n",
"print(result)"
]
Expand Down Expand Up @@ -292,7 +292,7 @@
"One way to make your application traces more useful or actionable is to tag or add metadata to the logs. The completion function accepts additional properties such as:\n",
"\n",
"- tags: List[str]\n",
"- metadata: Dict[str, str] - arbitrary key-value metadata \n",
"- metadata: Dict[str, str] - arbitrary key-value metadata\n",
"- target: str - a gold standard/expected output\n",
"- end_user_identifier: str - unique identifier for your end user\n",
"\n",
Expand All @@ -308,18 +308,18 @@
"# let's return the full CompletionResponse to see what other information is returned\n",
"@trace\n",
"def refiner2(query: str, additional_description: str, current_arg: str, criticism: str) -> CompletionResponse:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" )"
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" )"
],
"metadata": {
"id": "cXUHZpZbegIn",
Expand Down Expand Up @@ -348,13 +348,13 @@
"source": [
"# you can also add metadata and tags via the decorator\n",
"@trace(\n",
" tags=[\"cookbook-example-deployed\", \"feedback_tracked-deployed\"],\n",
" metadata={\"source\": \"python-sdk\", \"deployed\": True},\n",
" tags=[\"cookbook-example-deployed\", \"feedback_tracked-deployed\"],\n",
" metadata={\"source\": \"python-sdk\", \"deployed\": True},\n",
")\n",
"def argument_chain3(query: str, additional_description: str = \"\") -> CompletionResponse:\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner2(query, additional_description, argument, criticism)"
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner2(query, additional_description, argument, criticism)"
]
},
{
Expand All @@ -363,16 +363,16 @@
"import json, attrs\n",
"\n",
"result = argument_chain3(\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why sunshine is good for you.\",\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why sunshine is good for you.\",\n",
")\n",
"\n",
"p.record_feedback(\n",
" FeedbackRequest(\n",
" trace_id=result.trace_id,\n",
" score=0.5,\n",
" target=\"Moonshine is nice. Full stop.\",\n",
" )\n",
" FeedbackRequest(\n",
" trace_id=result.trace_id,\n",
" score=0.5,\n",
" target=\"Moonshine is nice. Full stop.\",\n",
" )\n",
")\n",
"print(json.dumps(attrs.asdict(result), indent=4))"
],
Expand Down Expand Up @@ -413,7 +413,7 @@
"## Recap\n",
"You made an example LLM application in this walkthrough and instrumented it using Parea's SDK.\n",
"\n",
"You also added tags and metadata and even logged feedback to the logs. The SDK integrates wonderfully with your deployed prompts on Parea, keeping your code flexible and lightweight. Now you can iterate, debug, and monitor your application with ease."
"You also added tags and metadata and even logged feedback to the logs. The SDK integrates wonderfully with your deployed prompts on Parea, keeping your code flexible and lightweight. Now you can iterate, debug, and monitor your application with ease.\n"
]
}
],
Expand Down
105 changes: 104 additions & 1 deletion parea/cookbook/tracing_with_open_ai_endpoint_directly.py
Original file line number Diff line number Diff line change
@@ -1 +1,104 @@
# TBU
import os
from datetime import datetime

import openai
from dotenv import load_dotenv

from parea import Parea

load_dotenv()

openai.api_key = os.getenv("OPENAI_API_KEY")

p = Parea(api_key=os.getenv("PAREA_API_KEY"))


def argument_generator(query: str, additional_description: str = "", date=datetime.now()) -> str:
return (
openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": f"""You are a debater making an argument on a topic.
{additional_description}.
The current time is {date}""",
},
{"role": "user", "content": f"""The discussion topic is {query}"""},
],
temperature=0.0,
)
.choices[0]
.message["content"]
)


def critic(argument: str) -> str:
return (
openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": f"""You are a critic.
What unresolved questions or criticism do you have after reading the following argument?
Provide a concise summary of your feedback.""",
},
{"role": "user", "content": f"""{argument}"""},
],
temperature=0.0,
)
.choices[0]
.message["content"]
)


def refiner(query: str, additional_description: str, current_arg: str, criticism: str, date=datetime.now()) -> str:
return (
openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": f"""You are a debater making an argument on a topic.
{additional_description}.
The current time is {date}""",
},
{"role": "user", "content": f"""The discussion topic is {query}"""},
{"role": "assistant", "content": f"""{current_arg}"""},
{"role": "user", "content": f"""{criticism}"""},
{"role": "system", "content": f"""Please generate a new argument that incorporates the feedback from the user."""},
],
temperature=0.0,
)
.choices[0]
.message["content"]
)


def argument_chain(query: str, additional_description: str = "") -> str:
argument = argument_generator(query, additional_description)
criticism = critic(argument)
return refiner(query, additional_description, argument, criticism)


if __name__ == "__main__":
result = argument_chain(
"Whether caffeine is good for you.",
additional_description="Provide a concise, few sentence argument on why caffeine is good for you.",
)
print(result)

from parea.schemas.models import FeedbackRequest
from parea.utils.trace_utils import get_current_trace_id

p = Parea(api_key=os.getenv("PAREA_API_KEY"))

trace_id = get_current_trace_id()
print(f"trace_id: {trace_id}")
p.record_feedback(
FeedbackRequest(
trace_id=trace_id,
score=0.7, # 0.0 (bad) to 1.0 (good)
)
)
2 changes: 1 addition & 1 deletion parea/schemas/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class Message:

@define
class ModelParams:
temp: float = 0.5
temp: float = 1.0
top_p: float = 1.0
frequency_penalty: float = 0.0
presence_penalty: float = 0.0
Expand Down
14 changes: 9 additions & 5 deletions parea/utils/trace_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,7 @@ def init_trace(func_name, args, kwargs, func):
def cleanup_trace(trace_id):
end_time = time.time()
trace_data.get()[trace_id].end_timestamp = to_date_and_time_string(end_time)
logging_thread = threading.Thread(
target=parea_logger.record_log,
kwargs={"data": trace_data.get()[trace_id]},
)
logging_thread.start()
default_logger(trace_id)
trace_context.get().pop()

def decorator(func):
Expand Down Expand Up @@ -135,3 +131,11 @@ def wrapper(*args, **kwargs):
return decorator(func)

return decorator


def default_logger(trace_id: str):
logging_thread = threading.Thread(
target=parea_logger.record_log,
kwargs={"data": trace_data.get()[trace_id]},
)
logging_thread.start()
17 changes: 17 additions & 0 deletions parea/wrapper/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from parea.utils.trace_utils import default_logger
from parea.wrapper.openai import OpenAIWrapper

_initialized_parea_wrapper = False


def init():
global _initialized_parea_wrapper
if _initialized_parea_wrapper:
return

OpenAIWrapper.init(default_logger)

_initialized_parea_wrapper = True


init()
Loading

0 comments on commit 8739aa4

Please sign in to comment.