diff --git a/parea/client.py b/parea/client.py index b3739a08..cb530b56 100644 --- a/parea/client.py +++ b/parea/client.py @@ -1,3 +1,6 @@ +import asyncio +import time + from attrs import asdict, define, field from parea.api_client import HTTPClient @@ -54,6 +57,7 @@ async def aget_prompt(self, data: UseDeployedPrompt) -> UseDeployedPromptRespons return UseDeployedPromptResponse(**r.json()) def record_feedback(self, data: FeedbackRequest) -> None: + time.sleep(2) # give logs time to update self._client.request( "POST", RECORD_FEEDBACK_ENDPOINT, @@ -61,6 +65,7 @@ def record_feedback(self, data: FeedbackRequest) -> None: ) async def arecord_feedback(self, data: FeedbackRequest) -> None: + await asyncio.sleep(2) # give logs time to update await self._client.request_async( "POST", RECORD_FEEDBACK_ENDPOINT, diff --git a/parea/cookbook/img/dashboard.png b/parea/cookbook/img/dashboard.png index 3b40bec5..ff615de2 100644 Binary files a/parea/cookbook/img/dashboard.png and b/parea/cookbook/img/dashboard.png differ diff --git a/parea/cookbook/img/dashboard_detailed_view.png b/parea/cookbook/img/dashboard_detailed_view.png index 307b12da..a24e5899 100644 Binary files a/parea/cookbook/img/dashboard_detailed_view.png and b/parea/cookbook/img/dashboard_detailed_view.png differ diff --git a/parea/cookbook/img/feedback.png b/parea/cookbook/img/feedback.png index 06be44cc..51f9714a 100644 Binary files a/parea/cookbook/img/feedback.png and b/parea/cookbook/img/feedback.png differ diff --git a/parea/cookbook/img/meta_data.png b/parea/cookbook/img/meta_data.png index f05cbd2a..c2435955 100644 Binary files a/parea/cookbook/img/meta_data.png and b/parea/cookbook/img/meta_data.png differ diff --git a/parea/cookbook/tracing_with_Parea_sdk.ipynb b/parea/cookbook/tracing_with_Parea_sdk.ipynb index 12167fe5..cb827998 100644 --- a/parea/cookbook/tracing_with_Parea_sdk.ipynb +++ b/parea/cookbook/tracing_with_Parea_sdk.ipynb @@ -369,7 +369,7 @@ "\n", "p.record_feedback(\n", " FeedbackRequest(\n", - " trace_id=result.trace_id,\n", + " trace_id=result.inference_id,\n", " score=0.5,\n", " target=\"Moonshine is nice. Full stop.\",\n", " )\n", diff --git a/parea/cookbook/tracing_with_agent.py b/parea/cookbook/tracing_with_agent.py index cb1c2d98..83eda9e8 100644 --- a/parea/cookbook/tracing_with_agent.py +++ b/parea/cookbook/tracing_with_agent.py @@ -1,6 +1,5 @@ import os import random -import time from dotenv import load_dotenv @@ -105,6 +104,5 @@ def run_agent(main_objective: str, initial_task: str = "") -> tuple[list[dict[st if __name__ == "__main__": result, trace_id = run_agent("Become a machine learning expert.", "Learn about tensors.") - time.sleep(1) - p.record_feedback(FeedbackRequest(trace_id=trace_id, score=0.642)) print(result) + p.record_feedback(FeedbackRequest(trace_id=trace_id, score=0.642)) diff --git a/parea/cookbook/tracing_with_deployed_prompt.py b/parea/cookbook/tracing_with_deployed_prompt.py index 3a654754..ffc4e26d 100644 --- a/parea/cookbook/tracing_with_deployed_prompt.py +++ b/parea/cookbook/tracing_with_deployed_prompt.py @@ -1,5 +1,4 @@ import os -import time from datetime import datetime from dotenv import load_dotenv @@ -105,7 +104,7 @@ def deployed_argument_chain3(query: str, additional_description: str = "") -> Co "Whether wine is good for you.", additional_description="Provide a concise, few sentence argument on why wine is good for you.", ) - time.sleep(3) + print(result2) p.record_feedback( FeedbackRequest( trace_id=trace_id2, @@ -113,18 +112,16 @@ def deployed_argument_chain3(query: str, additional_description: str = "") -> Co target="Moonshine is wonderful.", ) ) - print(result2) result3 = deployed_argument_chain3( - "Whether moonshine is good for you.", - additional_description="Provide a concise, few sentence argument on why moonshine is good for you.", + "Whether coffee is good for you.", + additional_description="Provide a concise, few sentence argument on why coffee is good for you.", ) - time.sleep(3) + print(result3.content) p.record_feedback( FeedbackRequest( trace_id=result3.inference_id, score=0.7, # 0.0 (bad) to 1.0 (good) - target="Moonshine is wonderful. End of story.", + target="Coffee is wonderful. End of story.", ) ) - print(result3.error or result3.content) diff --git a/parea/cookbook/tracing_with_open_ai_endpoint_directly.py b/parea/cookbook/tracing_with_open_ai_endpoint_directly.py index 83811d20..bc8399f9 100644 --- a/parea/cookbook/tracing_with_open_ai_endpoint_directly.py +++ b/parea/cookbook/tracing_with_open_ai_endpoint_directly.py @@ -5,97 +5,83 @@ from dotenv import load_dotenv from parea import Parea +from parea.schemas.models import FeedbackRequest +from parea.utils.trace_utils import get_current_trace_id, trace load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") -p = Parea(api_key=os.getenv("PAREA_API_KEY")) - - -def argument_generator(query: str, additional_description: str = "", date=datetime.now()) -> str: - return ( - openai.ChatCompletion.create( - model="gpt-3.5-turbo-0613", - messages=[ - { - "role": "system", - "content": f"""You are a debater making an argument on a topic. -{additional_description}. -The current time is {date}""", - }, - {"role": "user", "content": f"""The discussion topic is {query}"""}, - ], - temperature=0.0, - ) - .choices[0] - .message["content"] +p = Parea(api_key=os.getenv("DEV_API_KEY")) + + +def call_openai(data: list[dict], model: str = "gpt-3.5-turbo-0613", temperature: float = 0.0) -> str: + return openai.ChatCompletion.create(model=model, messages=data, temperature=temperature).choices[0].message["content"] + + +@trace +def argument_generator(query: str, additional_description: str = "") -> str: + return call_openai( + data=[ + { + "role": "system", + "content": f"""You are a debater making an argument on a topic. {additional_description}. + The current time is {datetime.now()}""", + }, + {"role": "user", "content": f"""The discussion topic is {query}"""}, + ] ) +@trace def critic(argument: str) -> str: - return ( - openai.ChatCompletion.create( - model="gpt-3.5-turbo-0613", - messages=[ - { - "role": "system", - "content": f"""You are a critic. -What unresolved questions or criticism do you have after reading the following argument? -Provide a concise summary of your feedback.""", - }, - {"role": "user", "content": f"""{argument}"""}, - ], - temperature=0.0, - ) - .choices[0] - .message["content"] + return call_openai( + data=[ + { + "role": "system", + "content": f"""You are a critic. + What unresolved questions or criticism do you have after reading the following argument? + Provide a concise summary of your feedback.""", + }, + {"role": "user", "content": f"""{argument}"""}, + ] ) -def refiner(query: str, additional_description: str, current_arg: str, criticism: str, date=datetime.now()) -> str: - return ( - openai.ChatCompletion.create( - model="gpt-3.5-turbo-0613", - messages=[ - { - "role": "system", - "content": f"""You are a debater making an argument on a topic. -{additional_description}. -The current time is {date}""", - }, - {"role": "user", "content": f"""The discussion topic is {query}"""}, - {"role": "assistant", "content": f"""{current_arg}"""}, - {"role": "user", "content": f"""{criticism}"""}, - {"role": "system", "content": f"""Please generate a new argument that incorporates the feedback from the user."""}, - ], - temperature=0.0, - ) - .choices[0] - .message["content"] +@trace +def refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str: + return call_openai( + data=[ + { + "role": "system", + "content": f"""You are a debater making an argument on a topic. {additional_description}. + The current time is {datetime.now()}""", + }, + {"role": "user", "content": f"""The discussion topic is {query}"""}, + {"role": "assistant", "content": f"""{current_arg}"""}, + {"role": "user", "content": f"""{criticism}"""}, + { + "role": "system", + "content": f"""Please generate a new argument that incorporates the feedback from the user.""", + }, + ] ) -def argument_chain(query: str, additional_description: str = "") -> str: +@trace +def argument_chain(query: str, additional_description: str = "") -> tuple[str, str]: + trace_id = get_current_trace_id() argument = argument_generator(query, additional_description) criticism = critic(argument) - return refiner(query, additional_description, argument, criticism) + return refiner(query, additional_description, argument, criticism), trace_id if __name__ == "__main__": - result = argument_chain( - "Whether caffeine is good for you.", - additional_description="Provide a concise, few sentence argument on why caffeine is good for you.", + result, trace_id = argument_chain( + "Whether sparkling water is good for you.", + additional_description="Provide a concise, few sentence argument on why sparkling water is good for you.", ) print(result) - - from parea.schemas.models import FeedbackRequest - from parea.utils.trace_utils import get_current_trace_id - - p = Parea(api_key=os.getenv("PAREA_API_KEY")) - - trace_id = get_current_trace_id() - print(f"trace_id: {trace_id}") p.record_feedback( FeedbackRequest( trace_id=trace_id, diff --git a/parea/cookbook/tracing_without_deployed_prompt.py b/parea/cookbook/tracing_without_deployed_prompt.py index 4a073d0f..cce1600c 100644 --- a/parea/cookbook/tracing_without_deployed_prompt.py +++ b/parea/cookbook/tracing_without_deployed_prompt.py @@ -1,5 +1,4 @@ import os -import time from datetime import datetime from dotenv import load_dotenv @@ -136,7 +135,7 @@ def argument_chain3(query: str, additional_description: str = "") -> CompletionR "Whether wine is good for you.", additional_description="Provide a concise, few sentence argument on why wine is good for you.", ) - time.sleep(3) + print(result2) p.record_feedback( FeedbackRequest( trace_id=trace_id2, @@ -144,13 +143,12 @@ def argument_chain3(query: str, additional_description: str = "") -> CompletionR target="Moonshine is wonderful.", ) ) - print(result2) result3 = argument_chain3( "Whether moonshine is good for you.", additional_description="Provide a concise, few sentence argument on why moonshine is good for you.", ) - time.sleep(3) + print(result3.content) p.record_feedback( FeedbackRequest( trace_id=result3.inference_id, @@ -158,4 +156,3 @@ def argument_chain3(query: str, additional_description: str = "") -> CompletionR target="Moonshine is wonderful. End of story.", ) ) - print(result3.content) diff --git a/parea/schemas/models.py b/parea/schemas/models.py index ded3f502..2dc857bf 100644 --- a/parea/schemas/models.py +++ b/parea/schemas/models.py @@ -71,7 +71,6 @@ class CompletionResponse: status: str start_timestamp: str end_timestamp: str - trace_id: Optional[str] = None error: Optional[str] = None diff --git a/parea/utils/trace_utils.py b/parea/utils/trace_utils.py index 14135bae..2cac80d9 100644 --- a/parea/utils/trace_utils.py +++ b/parea/utils/trace_utils.py @@ -34,7 +34,7 @@ def merge(old, new): return new -def get_current_trace_id(): +def get_current_trace_id() -> str: return trace_context.get()[-1]