Skip to content

Commit

Permalink
Merge pull request #53 from parea-ai/format-cookbook
Browse files Browse the repository at this point in the history
update cookbooks
  • Loading branch information
jalexanderII committed Aug 29, 2023
2 parents a639b23 + 76aacaf commit d96e674
Show file tree
Hide file tree
Showing 12 changed files with 70 additions and 88 deletions.
5 changes: 5 additions & 0 deletions parea/client.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import asyncio
import time

from attrs import asdict, define, field

from parea.api_client import HTTPClient
Expand Down Expand Up @@ -54,13 +57,15 @@ async def aget_prompt(self, data: UseDeployedPrompt) -> UseDeployedPromptRespons
return UseDeployedPromptResponse(**r.json())

def record_feedback(self, data: FeedbackRequest) -> None:
time.sleep(2) # give logs time to update
self._client.request(
"POST",
RECORD_FEEDBACK_ENDPOINT,
data=asdict(data),
)

async def arecord_feedback(self, data: FeedbackRequest) -> None:
await asyncio.sleep(2) # give logs time to update
await self._client.request_async(
"POST",
RECORD_FEEDBACK_ENDPOINT,
Expand Down
Binary file modified parea/cookbook/img/dashboard.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified parea/cookbook/img/dashboard_detailed_view.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified parea/cookbook/img/feedback.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified parea/cookbook/img/meta_data.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion parea/cookbook/tracing_with_Parea_sdk.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@
"\n",
"p.record_feedback(\n",
" FeedbackRequest(\n",
" trace_id=result.trace_id,\n",
" trace_id=result.inference_id,\n",
" score=0.5,\n",
" target=\"Moonshine is nice. Full stop.\",\n",
" )\n",
Expand Down
4 changes: 1 addition & 3 deletions parea/cookbook/tracing_with_agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import os
import random
import time

from dotenv import load_dotenv

Expand Down Expand Up @@ -105,6 +104,5 @@ def run_agent(main_objective: str, initial_task: str = "") -> tuple[list[dict[st

if __name__ == "__main__":
result, trace_id = run_agent("Become a machine learning expert.", "Learn about tensors.")
time.sleep(1)
p.record_feedback(FeedbackRequest(trace_id=trace_id, score=0.642))
print(result)
p.record_feedback(FeedbackRequest(trace_id=trace_id, score=0.642))
13 changes: 5 additions & 8 deletions parea/cookbook/tracing_with_deployed_prompt.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import time
from datetime import datetime

from dotenv import load_dotenv
Expand Down Expand Up @@ -105,26 +104,24 @@ def deployed_argument_chain3(query: str, additional_description: str = "") -> Co
"Whether wine is good for you.",
additional_description="Provide a concise, few sentence argument on why wine is good for you.",
)
time.sleep(3)
print(result2)
p.record_feedback(
FeedbackRequest(
trace_id=trace_id2,
score=0.0, # 0.0 (bad) to 1.0 (good)
target="Moonshine is wonderful.",
)
)
print(result2)

result3 = deployed_argument_chain3(
"Whether moonshine is good for you.",
additional_description="Provide a concise, few sentence argument on why moonshine is good for you.",
"Whether coffee is good for you.",
additional_description="Provide a concise, few sentence argument on why coffee is good for you.",
)
time.sleep(3)
print(result3.content)
p.record_feedback(
FeedbackRequest(
trace_id=result3.inference_id,
score=0.7, # 0.0 (bad) to 1.0 (good)
target="Moonshine is wonderful. End of story.",
target="Coffee is wonderful. End of story.",
)
)
print(result3.error or result3.content)
124 changes: 55 additions & 69 deletions parea/cookbook/tracing_with_open_ai_endpoint_directly.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,97 +5,83 @@
from dotenv import load_dotenv

from parea import Parea
from parea.schemas.models import FeedbackRequest
from parea.utils.trace_utils import get_current_trace_id, trace

load_dotenv()

openai.api_key = os.getenv("OPENAI_API_KEY")

p = Parea(api_key=os.getenv("PAREA_API_KEY"))


def argument_generator(query: str, additional_description: str = "", date=datetime.now()) -> str:
return (
openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": f"""You are a debater making an argument on a topic.
{additional_description}.
The current time is {date}""",
},
{"role": "user", "content": f"""The discussion topic is {query}"""},
],
temperature=0.0,
)
.choices[0]
.message["content"]
p = Parea(api_key=os.getenv("DEV_API_KEY"))


def call_openai(data: list[dict], model: str = "gpt-3.5-turbo-0613", temperature: float = 0.0) -> str:
return openai.ChatCompletion.create(model=model, messages=data, temperature=temperature).choices[0].message["content"]


@trace
def argument_generator(query: str, additional_description: str = "") -> str:
return call_openai(
data=[
{
"role": "system",
"content": f"""You are a debater making an argument on a topic. {additional_description}.
The current time is {datetime.now()}""",
},
{"role": "user", "content": f"""The discussion topic is {query}"""},
]
)


@trace
def critic(argument: str) -> str:
return (
openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": f"""You are a critic.
What unresolved questions or criticism do you have after reading the following argument?
Provide a concise summary of your feedback.""",
},
{"role": "user", "content": f"""{argument}"""},
],
temperature=0.0,
)
.choices[0]
.message["content"]
return call_openai(
data=[
{
"role": "system",
"content": f"""You are a critic.
What unresolved questions or criticism do you have after reading the following argument?
Provide a concise summary of your feedback.""",
},
{"role": "user", "content": f"""{argument}"""},
]
)


def refiner(query: str, additional_description: str, current_arg: str, criticism: str, date=datetime.now()) -> str:
return (
openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": f"""You are a debater making an argument on a topic.
{additional_description}.
The current time is {date}""",
},
{"role": "user", "content": f"""The discussion topic is {query}"""},
{"role": "assistant", "content": f"""{current_arg}"""},
{"role": "user", "content": f"""{criticism}"""},
{"role": "system", "content": f"""Please generate a new argument that incorporates the feedback from the user."""},
],
temperature=0.0,
)
.choices[0]
.message["content"]
@trace
def refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str:
return call_openai(
data=[
{
"role": "system",
"content": f"""You are a debater making an argument on a topic. {additional_description}.
The current time is {datetime.now()}""",
},
{"role": "user", "content": f"""The discussion topic is {query}"""},
{"role": "assistant", "content": f"""{current_arg}"""},
{"role": "user", "content": f"""{criticism}"""},
{
"role": "system",
"content": f"""Please generate a new argument that incorporates the feedback from the user.""",
},
]
)


def argument_chain(query: str, additional_description: str = "") -> str:
@trace
def argument_chain(query: str, additional_description: str = "") -> tuple[str, str]:
trace_id = get_current_trace_id()
argument = argument_generator(query, additional_description)
criticism = critic(argument)
return refiner(query, additional_description, argument, criticism)
return refiner(query, additional_description, argument, criticism), trace_id


if __name__ == "__main__":
result = argument_chain(
"Whether caffeine is good for you.",
additional_description="Provide a concise, few sentence argument on why caffeine is good for you.",
result, trace_id = argument_chain(
"Whether sparkling water is good for you.",
additional_description="Provide a concise, few sentence argument on why sparkling water is good for you.",
)
print(result)

from parea.schemas.models import FeedbackRequest
from parea.utils.trace_utils import get_current_trace_id

p = Parea(api_key=os.getenv("PAREA_API_KEY"))

trace_id = get_current_trace_id()
print(f"trace_id: {trace_id}")
p.record_feedback(
FeedbackRequest(
trace_id=trace_id,
Expand Down
7 changes: 2 additions & 5 deletions parea/cookbook/tracing_without_deployed_prompt.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import time
from datetime import datetime

from dotenv import load_dotenv
Expand Down Expand Up @@ -136,26 +135,24 @@ def argument_chain3(query: str, additional_description: str = "") -> CompletionR
"Whether wine is good for you.",
additional_description="Provide a concise, few sentence argument on why wine is good for you.",
)
time.sleep(3)
print(result2)
p.record_feedback(
FeedbackRequest(
trace_id=trace_id2,
score=0.0, # 0.0 (bad) to 1.0 (good)
target="Moonshine is wonderful.",
)
)
print(result2)

result3 = argument_chain3(
"Whether moonshine is good for you.",
additional_description="Provide a concise, few sentence argument on why moonshine is good for you.",
)
time.sleep(3)
print(result3.content)
p.record_feedback(
FeedbackRequest(
trace_id=result3.inference_id,
score=0.7, # 0.0 (bad) to 1.0 (good)
target="Moonshine is wonderful. End of story.",
)
)
print(result3.content)
1 change: 0 additions & 1 deletion parea/schemas/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ class CompletionResponse:
status: str
start_timestamp: str
end_timestamp: str
trace_id: Optional[str] = None
error: Optional[str] = None


Expand Down
2 changes: 1 addition & 1 deletion parea/utils/trace_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def merge(old, new):
return new


def get_current_trace_id():
def get_current_trace_id() -> str:
return trace_context.get()[-1]


Expand Down

0 comments on commit d96e674

Please sign in to comment.