Skip to content

Commit

Permalink
formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
jalexanderII committed Aug 28, 2023
1 parent b01ca09 commit b0e9326
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 70 deletions.
2 changes: 1 addition & 1 deletion parea/api_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class HTTPClient:
_instance = None
base_url = "https://optimus-prompt-backend.vercel.app/api/parea/v1"
base_url = "http://localhost:8000/api/parea/v1" # "https://optimus-prompt-backend.vercel.app/api/parea/v1"
api_key = None

def __new__(cls, *args, **kwargs):
Expand Down
126 changes: 63 additions & 63 deletions parea/cookbook/tracing_with_Parea_sdk.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -113,50 +113,50 @@
"# We pass the deployment_id and the required inputs to the completion function along with the trace_id\n",
"@trace\n",
"def argument_generator(query: str, additional_description: str = \"\") -> str:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-Ar-Oi14-nBxHUiradyql9\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" },\n",
" )\n",
" ).content\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-Ar-Oi14-nBxHUiradyql9\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" },\n",
" )\n",
" ).content\n",
"\n",
"\n",
"@trace\n",
"def critic(argument: str) -> str:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-W2yPy93tAczYrxkipjli6\",\n",
" llm_inputs={\"argument\": argument},\n",
" )\n",
" ).content\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-W2yPy93tAczYrxkipjli6\",\n",
" llm_inputs={\"argument\": argument},\n",
" )\n",
" ).content\n",
"\n",
"\n",
"@trace\n",
"def refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" ).content\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" ).content\n",
"\n",
"\n",
"# This is the parent function which orchestrates the chaining. We'll define our trace_id and trace_name here\n",
"@trace\n",
"def argument_chain(query: str, additional_description: str = \"\") -> str:\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism)"
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism)"
]
},
{
Expand Down Expand Up @@ -184,8 +184,8 @@
"outputs": [],
"source": [
"result = argument_chain(\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
")\n",
"print(result)"
]
Expand Down Expand Up @@ -222,10 +222,10 @@
"\n",
"@trace\n",
"def argument_chain2(query: str, additional_description: str = \"\") -> tuple[str, str]:\n",
" trace_id = get_current_trace_id()\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism), trace_id"
" trace_id = get_current_trace_id()\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner(query, additional_description, argument, criticism), trace_id"
]
},
{
Expand All @@ -239,8 +239,8 @@
"outputs": [],
"source": [
"result, trace_id = argument_chain2(\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why moonshine is good for you.\",\n",
")\n",
"print(result)"
]
Expand Down Expand Up @@ -308,18 +308,18 @@
"# let's return the full CompletionResponse to see what other information is returned\n",
"@trace\n",
"def refiner2(query: str, additional_description: str, current_arg: str, criticism: str) -> CompletionResponse:\n",
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" )"
" return p.completion(\n",
" Completion(\n",
" deployment_id=\"p-8Er1Xo0GDGF2xtpmMOpbn\",\n",
" llm_inputs={\n",
" \"additional_description\": additional_description,\n",
" \"date\": f\"{datetime.now()}\",\n",
" \"query\": query,\n",
" \"current_arg\": current_arg,\n",
" \"criticism\": criticism,\n",
" },\n",
" )\n",
" )"
],
"metadata": {
"id": "cXUHZpZbegIn",
Expand Down Expand Up @@ -348,13 +348,13 @@
"source": [
"# you can also add metadata and tags via the decorator\n",
"@trace(\n",
" tags=[\"cookbook-example-deployed\", \"feedback_tracked-deployed\"],\n",
" metadata={\"source\": \"python-sdk\", \"deployed\": True},\n",
" tags=[\"cookbook-example-deployed\", \"feedback_tracked-deployed\"],\n",
" metadata={\"source\": \"python-sdk\", \"deployed\": True},\n",
")\n",
"def argument_chain3(query: str, additional_description: str = \"\") -> CompletionResponse:\n",
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner2(query, additional_description, argument, criticism)"
" argument = argument_generator(query, additional_description)\n",
" criticism = critic(argument)\n",
" return refiner2(query, additional_description, argument, criticism)"
]
},
{
Expand All @@ -363,16 +363,16 @@
"import json, attrs\n",
"\n",
"result = argument_chain3(\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why sunshine is good for you.\",\n",
" \"Whether moonshine is good for you.\",\n",
" additional_description=\"Provide a concise, few sentence argument on why sunshine is good for you.\",\n",
")\n",
"\n",
"p.record_feedback(\n",
" FeedbackRequest(\n",
" trace_id=result.trace_id,\n",
" score=0.5,\n",
" target=\"Moonshine is nice. Full stop.\",\n",
" )\n",
" FeedbackRequest(\n",
" trace_id=result.trace_id,\n",
" score=0.5,\n",
" target=\"Moonshine is nice. Full stop.\",\n",
" )\n",
")\n",
"print(json.dumps(attrs.asdict(result), indent=4))"
],
Expand Down
11 changes: 5 additions & 6 deletions parea/cookbook/tracing_with_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,9 @@

p = Parea(api_key=os.getenv("DEV_API_KEY"))


LIMIT = 5
# Parea SDK makes it easy to use different LLMs with the same apis structure and standardized request/response schemas.
LLM_OPTIONS = [("gpt-3.5-turbo", "openai"), ("gpt-4", "openai"), ("claude-instant-1", "anthropic"), ("claude-2", "anthropic")]
LIMIT = 1


def dump_task(task):
Expand Down Expand Up @@ -58,9 +59,7 @@ def expound_task(main_objective: str, current_task: str) -> list[dict[str, str]]

@trace
def generate_tasks(main_objective: str, expounded_initial_task: list[dict[str, str]]) -> list[str]:
llm_options = [("gpt-3.5-turbo", "openai"), ("gpt-4", "openai"), ("claude-instant-1", "anthropic"), ("claude-2", "anthropic")]
select_llm_option = random.choice(llm_options)

select_llm_option = random.choice(LLM_OPTIONS)
task_expansion = dump_task(expounded_initial_task)
prompt = [
Message(
Expand Down Expand Up @@ -106,6 +105,6 @@ def run_agent(main_objective: str, initial_task: str = "") -> tuple[list[dict[st

if __name__ == "__main__":
result, trace_id = run_agent("Become a machine learning expert.", "Learn about tensors.")
time.sleep(3)
time.sleep(1)
p.record_feedback(FeedbackRequest(trace_id=trace_id, score=0.642))
print(result)

0 comments on commit b0e9326

Please sign in to comment.