diff --git a/parea/cookbook/tracing_with_Parea_sdk.ipynb b/parea/cookbook/tracing_with_Parea_sdk.ipynb index 35a3c079..50b7513a 100644 --- a/parea/cookbook/tracing_with_Parea_sdk.ipynb +++ b/parea/cookbook/tracing_with_Parea_sdk.ipynb @@ -10,7 +10,7 @@ "source": [ "# LLM Tracing\n", "\n", - "With the Parea SDK, you can gain visibility into **any LLM application**. Together with the web application, Parea speeds up your debugging, evaluating, and monitoring workflows. \n", + "With the Parea SDK, you can gain visibility into **any LLM application**. Together with the web application, Parea speeds up your debugging, evaluating, and monitoring workflows.\n", "Parea is also framework and provider-agnostic. Parea traces your prompts and chains, whether deployed from Parea or within your codebase.\n", "\n", "We will create a simple chat app and instrument logging with Parea. We will also add tags and other metadata to enrich our traces. The chat app uses three 'chained' components to generate a text argument on a provided subject:\n", @@ -111,11 +111,10 @@ "p = Parea(api_key=os.getenv(\"PAREA_API_KEY\"))\n", "\n", "\n", - "@trace\n", "def argument_generator(query: str, additional_description: str = \"\") -> str:\n", " return p.completion(\n", " Completion(\n", - " deployment_id=\"p-Ar-Oi14-nBxHUiradyql9\",\n", + " deployment_id=\"p-RG8d9rfJc_0cctwfpb_n6\",\n", " llm_inputs={\n", " \"additional_description\": additional_description,\n", " \"date\": f\"{datetime.now()}\",\n", @@ -125,16 +124,19 @@ " ).content\n", "\n", "\n", - "@trace\n", "def critic(argument: str) -> str:\n", - " return p.completion(Completion(deployment_id=\"p-W2yPy93tAczYrxkipjli6\", llm_inputs={\"argument\": argument})).content\n", + " return p.completion(\n", + " Completion(\n", + " deployment_id=\"p-fXgZytT3dJjXD_71TDR4s\",\n", + " llm_inputs={\"argument\": argument},\n", + " )\n", + " ).content\n", "\n", "\n", - "@trace\n", "def refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str:\n", " return p.completion(\n", " Completion(\n", - " deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n", + " deployment_id=\"p--G2s9okMTvBEh3d8YqLY2\",\n", " llm_inputs={\n", " \"additional_description\": additional_description,\n", " \"date\": f\"{datetime.now()}\",\n", @@ -283,7 +285,6 @@ "\n", "\n", "# let's return the full CompletionResponse to see what other information is returned\n", - "@trace\n", "def refiner2(query: str, additional_description: str, current_arg: str, criticism: str) -> CompletionResponse:\n", " return p.completion(\n", " Completion(\n", diff --git a/parea/cookbook/tracing_with_deployed_prompt.py b/parea/cookbook/tracing_with_deployed_prompt.py index c6bcd996..058bd04f 100644 --- a/parea/cookbook/tracing_with_deployed_prompt.py +++ b/parea/cookbook/tracing_with_deployed_prompt.py @@ -1,3 +1,5 @@ +from typing import Tuple + import os from datetime import datetime @@ -15,7 +17,7 @@ def deployed_argument_generator(query: str, additional_description: str = "") -> str: return p.completion( Completion( - deployment_id="p-Ar-Oi14-nBxHUiradyql9", + deployment_id="p-RG8d9rfJc_0cctwfpb_n6", llm_inputs={ "additional_description": additional_description, "date": f"{datetime.now()}", @@ -28,7 +30,7 @@ def deployed_argument_generator(query: str, additional_description: str = "") -> def deployed_critic(argument: str) -> str: return p.completion( Completion( - deployment_id="p-W2yPy93tAczYrxkipjli6", + deployment_id="p-fXgZytT3dJjXD_71TDR4s", llm_inputs={"argument": argument}, ) ).content @@ -37,7 +39,7 @@ def deployed_critic(argument: str) -> str: def deployed_refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str: return p.completion( Completion( - deployment_id="p-8Er1Xo0GDGF2xtpmMOpbn", + deployment_id="p--G2s9okMTvBEh3d8YqLY2", llm_inputs={ "additional_description": additional_description, "date": f"{datetime.now()}", @@ -52,7 +54,7 @@ def deployed_refiner(query: str, additional_description: str, current_arg: str, def deployed_refiner2(query: str, additional_description: str, current_arg: str, criticism: str) -> CompletionResponse: return p.completion( Completion( - deployment_id="p-8Er1Xo0GDGF2xtpmMOpbn", + deployment_id="p--G2s9okMTvBEh3d8YqLY2", llm_inputs={ "additional_description": additional_description, "date": f"{datetime.now()}", @@ -71,6 +73,14 @@ def deployed_argument_chain(query: str, additional_description: str = "") -> str return deployed_refiner(query, additional_description, argument, criticism) +@trace +def deployed_argument_chain2(query: str, additional_description: str = "") -> Tuple[str, str]: + trace_id = get_current_trace_id() + argument = deployed_argument_generator(query, additional_description) + criticism = deployed_critic(argument) + return deployed_refiner(query, additional_description, argument, criticism), trace_id + + @trace( tags=["cookbook-example-deployed", "feedback_tracked-deployed"], metadata={"source": "python-sdk", "deployed": True}, @@ -88,11 +98,10 @@ def deployed_argument_chain_tags_metadata(query: str, additional_description: st ) print(result1) - result2 = deployed_argument_chain( + result2, trace_id2 = deployed_argument_chain2( "Whether wine is good for you.", additional_description="Provide a concise, few sentence argument on why wine is good for you.", ) - trace_id2 = get_current_trace_id() print(result2) p.record_feedback( FeedbackRequest( diff --git a/parea/cookbook/tracing_with_open_ai_endpoint_directly.py b/parea/cookbook/tracing_with_open_ai_endpoint_directly.py index 77c8ed8c..6d9683ae 100644 --- a/parea/cookbook/tracing_with_open_ai_endpoint_directly.py +++ b/parea/cookbook/tracing_with_open_ai_endpoint_directly.py @@ -17,6 +17,7 @@ @trace def argument_chain(query: str, additional_description: str = "") -> str: + trace_id = get_current_trace_id() argument = ( openai.ChatCompletion.create( model="gpt-3.5-turbo-0613", @@ -75,15 +76,14 @@ def argument_chain(query: str, additional_description: str = "") -> str: .message["content"] ) - return refined_argument + return refined_argument, trace_id if __name__ == "__main__": - result = argument_chain( + result, trace_id = argument_chain( "Whether sparkling water is good for you.", additional_description="Provide a concise, few sentence argument on why sparkling water is good for you.", ) - trace_id = get_current_trace_id() print(result) p.record_feedback( FeedbackRequest( diff --git a/pyproject.toml b/pyproject.toml index d871f467..56e7f086 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "parea-ai" packages = [{ include = "parea" }] -version = "0.2.5" +version = "0.2.6" description = "Parea python sdk" readme = "README.md" authors = ["joel-parea-ai "]