Skip to content

Commit

Permalink
Merge pull request #882 from parea-ai/test-depth
Browse files Browse the repository at this point in the history
Add depth and execution order.
  • Loading branch information
jalexanderII committed May 21, 2024
2 parents be9c467 + 12e4cf8 commit b450d72
Show file tree
Hide file tree
Showing 10 changed files with 104 additions and 57 deletions.
89 changes: 47 additions & 42 deletions parea/cookbook/tracing_tool_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,48 +13,53 @@
p.wrap_openai_client(client)


tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
def main():
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
"required": ["location"],
},
"required": ["location"],
},
},
}
]
messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
tools=tools,
tool_choice="auto",
)
messages.append({k: v for k, v in completion.choices[0].message.model_dump().items() if v is not None})
# messages.append(completion.choices[0].message)
messages.append({"role": "tool", "content": "5 Celcius", "tool_call_id": completion.choices[0].message.tool_calls[0].id})
messages.append(
{
"role": "user",
"content": "What's the weather like in Boston today?",
}
)

final_completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
tools=tools,
tool_choice="auto",
)

print(final_completion)
}
]
messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]
completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto",
)
messages.append({k: v for k, v in completion.choices[0].message.model_dump().items() if v is not None})
# messages.append(completion.choices[0].message)
messages.append({"role": "tool", "content": "5 Celcius", "tool_call_id": completion.choices[0].message.tool_calls[0].id})
messages.append(
{
"role": "user",
"content": "What's the weather like in Boston today?",
}
)

final_completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto",
)

print(final_completion)


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion parea/cookbook/tracing_with_images_open_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def image_maker(query: str) -> str:
@trace
def ask_vision(image_url: str) -> Optional[str]:
response = client.chat.completions.create(
model="gpt-4-turbo",
model="gpt-4o",
messages=[
{
"role": "user",
Expand Down
6 changes: 3 additions & 3 deletions parea/cookbook/tracing_with_open_ai_endpoint_directly.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
p.wrap_openai_client(client)


def call_llm(data: List[dict], model: str = "gpt-4-turbo", temperature: float = 0.0) -> str:
def call_llm(data: List[dict], model: str = "gpt-4o", temperature: float = 0.0) -> str:
return client.chat.completions.create(model=model, temperature=temperature, messages=data).choices[0].message.content


Expand Down Expand Up @@ -64,7 +64,7 @@ def refiner(query: str, additional_description: str, argument: str, criticism: s
{"role": "user", "content": criticism},
{
"role": "system",
"content": f"Please generate a new argument that incorporates the feedback from the user.",
"content": "Please generate a new argument that incorporates the feedback from the user.",
},
],
)
Expand All @@ -83,7 +83,7 @@ def argument_chain(query: str, additional_description: str = "") -> Tuple[str, s
@trace(session_id="cus_1234", end_user_identifier="user_1234")
def json_call() -> str:
completion = client.chat.completions.create(
model="gpt-4-turbo-2024-04-09",
model="gpt-4o",
messages=[{"role": "system", "content": "You are a helpful assistant talking in JSON."}, {"role": "user", "content": "What are you?"}],
response_format={"type": "json_object"},
)
Expand Down
3 changes: 3 additions & 0 deletions parea/schemas/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,9 @@ class TraceLog(EvaluatedLog):
comments: Optional[List[TraceLogCommentSchema]] = None
annotations: Optional[Dict[int, Dict[str, TraceLogAnnotationSchema]]] = None

depth: int = 0
execution_order: int = 0


@define
class TraceLogTree(TraceLog):
Expand Down
17 changes: 16 additions & 1 deletion parea/utils/trace_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@
# Context variable to maintain running evals in thread
thread_ids_running_evals = contextvars.ContextVar("thread_ids_running_evals", default=[])

# Add a counter variable to maintain the execution order
execution_order_counters = contextvars.ContextVar("execution_order_counters", default={})


def log_in_thread(target_func: Callable, data: Dict[str, Any]):
logging_thread = threading.Thread(target=target_func, kwargs=data)
Expand Down Expand Up @@ -179,10 +182,20 @@ def init_trace(func_name, _parea_target_field, args, kwargs, func) -> Tuple[str,
# if we can't serialize the value, just convert it to a string
inputs[k] = str(v)

depth = len(new_trace_context) - 1
root_trace_id = new_trace_context[0]

# Get the execution order counter for the current root trace
counters = execution_order_counters.get()
if root_trace_id not in counters:
counters[root_trace_id] = 0
execution_order = counters[root_trace_id]
counters[root_trace_id] += 1

trace_data.get()[trace_id] = TraceLog(
trace_id=trace_id,
parent_trace_id=trace_id,
root_trace_id=new_trace_context[0],
root_trace_id=root_trace_id,
start_timestamp=start_time.isoformat(),
trace_name=name or func_name,
end_user_identifier=end_user_identifier,
Expand All @@ -194,6 +207,8 @@ def init_trace(func_name, _parea_target_field, args, kwargs, func) -> Tuple[str,
experiment_uuid=os.environ.get(PAREA_OS_ENV_EXPERIMENT_UUID, None),
apply_eval_frac=apply_eval_frac,
deployment_id=deployment_id,
depth=depth,
execution_order=execution_order,
)
parent_trace_id = new_trace_context[-2] if len(new_trace_context) > 1 else None
if parent_trace_id:
Expand Down
1 change: 1 addition & 0 deletions parea/wrapper/anthropic/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def init(self, log: Callable, cache: Cache, client: Client):
convert_kwargs_to_cache_request=self.convert_kwargs_to_cache_request,
convert_cache_to_response=self.convert_cache_to_response,
aconvert_cache_to_response=self.aconvert_cache_to_response,
name="llm-anthropic",
)

@staticmethod
Expand Down
1 change: 1 addition & 0 deletions parea/wrapper/openai/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def init(self, log: Callable, cache: Cache = None, module_client=openai):
convert_kwargs_to_cache_request=self.convert_kwargs_to_cache_request,
convert_cache_to_response=self.convert_cache_to_response,
aconvert_cache_to_response=self.aconvert_cache_to_response,
name="llm-openai",
)

def resolver(self, trace_id: str, _args: Sequence[Any], kwargs: Dict[str, Any], response: Optional[Any]) -> Optional[Any]:
Expand Down
11 changes: 9 additions & 2 deletions parea/wrapper/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,11 @@ def clean_json_string(s):
try:
function_args = json.loads(body.arguments)
except json.decoder.JSONDecodeError:
function_args = json.loads(clean_json_string(body.arguments))
try:
function_args = json.loads(clean_json_string(body.arguments))
except Exception:
function_args = str(body.arguments)

if is_tool_call:
calls.append(
{
Expand Down Expand Up @@ -325,7 +329,10 @@ def _process_stream_response(content: list, tools: dict, data: dict, trace_id: s

tool_calls = [t["function"] for t in tools.values()]
for tool in tool_calls:
tool["arguments"] = json.loads(tool["arguments"])
try:
tool["arguments"] = json.loads(tool["arguments"])
except Exception:
tool["arguments"] = str(tool["arguments"])

completion = final_content or json_dumps(tool_calls, indent=4)

Expand Down
29 changes: 22 additions & 7 deletions parea/wrapper/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from parea.evals.utils import _make_evaluations
from parea.helpers import is_logging_disabled, timezone_aware_now
from parea.schemas.models import TraceLog, UpdateTraceScenario
from parea.utils.trace_utils import call_eval_funcs_then_log, fill_trace_data, trace_context, trace_data
from parea.utils.trace_utils import call_eval_funcs_then_log, execution_order_counters, fill_trace_data, trace_context, trace_data
from parea.wrapper.utils import safe_format_template_to_prompt, skip_decorator_if_func_in_stack

logger = logging.getLogger()
Expand All @@ -33,6 +33,7 @@ def __init__(
convert_cache_to_response: Callable,
aconvert_cache_to_response: Callable,
log: Callable,
name: str = "LLM",
) -> None:
self.resolver = resolver
self.gen_resolver = gen_resolver
Expand All @@ -44,6 +45,7 @@ def __init__(
self.convert_kwargs_to_cache_request = convert_kwargs_to_cache_request
self.convert_cache_to_response = convert_cache_to_response
self.aconvert_cache_to_response = aconvert_cache_to_response
self.name = name

def wrap_functions(self, module: Any, func_names: List[str]):
for func_name in func_names:
Expand Down Expand Up @@ -79,27 +81,40 @@ def _init_trace(self, kwargs) -> Tuple[str, datetime, contextvars.Token]:
new_trace_context = trace_context.get() + [trace_id]
token = trace_context.set(new_trace_context)

if is_logging_disabled():
return trace_id, start_time, token

if template_inputs := kwargs.pop("template_inputs", None):
for m in kwargs["messages"] or []:
for m in kwargs.get("messages", []):
if isinstance(m, dict) and "content" in m:
m["content"] = safe_format_template_to_prompt(m["content"], **template_inputs)

if is_logging_disabled():
return trace_id, start_time, token
depth = len(new_trace_context) - 1
root_trace_id = new_trace_context[0]

# Get the execution order counter for the current root trace
counters = execution_order_counters.get()
if root_trace_id not in counters:
counters[root_trace_id] = 0
execution_order = counters[root_trace_id]
counters[root_trace_id] += 1

try:
trace_data.get()[trace_id] = TraceLog(
trace_id=trace_id,
parent_trace_id=new_trace_context[0],
root_trace_id=new_trace_context[0],
parent_trace_id=root_trace_id,
root_trace_id=root_trace_id,
start_timestamp=start_time.isoformat(),
trace_name="LLM",
trace_name=self.name,
end_user_identifier=None,
session_id=None,
metadata=None,
target=None,
tags=None,
inputs=template_inputs,
experiment_uuid=os.getenv(PAREA_OS_ENV_EXPERIMENT_UUID, None),
depth=depth,
execution_order=execution_order,
)

parent_trace_id = new_trace_context[-2] if len(new_trace_context) > 1 else None
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "parea-ai"
packages = [{ include = "parea" }]
version = "0.2.157"
version = "0.2.158"
description = "Parea python sdk"
readme = "README.md"
authors = ["joel-parea-ai <[email protected]>"]
Expand Down

0 comments on commit b450d72

Please sign in to comment.