From 583e790065ebd9cc963e068430b7e83e9873b64e Mon Sep 17 00:00:00 2001 From: Joschka Braun Date: Mon, 1 Jul 2024 09:43:49 -0400 Subject: [PATCH 1/4] docs: readme update --- README.md | 188 ++++++++++++++++++++++++++---------------------------- 1 file changed, 91 insertions(+), 97 deletions(-) diff --git a/README.md b/README.md index f19ade31..6a5566e4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Parea Python SDK +# Evaluate Your AI Application with Parea's Python SDK
@@ -8,12 +8,12 @@ [![Pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://github.com/parea-ai/parea-sdk/blob/master/.pre-commit-config.yaml) [![Semantic Versions](https://img.shields.io/badge/%20%20%F0%9F%93%A6%F0%9F%9A%80-semantic--versions-e10079.svg)](https://github.com/parea-ai/parea-sdk/releases) -[![License](https://img.shields.io/github/license/parea-ai/parea-sdk)](https://github.com/parea-ai/parea-sdk/blob/master/LICENSE) - -Parea python sdk +[![License](https://img.shields.io/github/license/parea-ai/parea-sdk)](https://github.com/parea-ai/parea-sdk/blob/main/LICENSE)
+[Parea AI](https://www.parea.ai) provides a SDK to evaluate & monitor your AI applications. + [Python SDK Docs](https://docs.parea.ai/api-reference/sdk/python) ## Installation @@ -28,6 +28,54 @@ or install with `Poetry` poetry add parea-ai ``` + +## Evaluating Your LLM App + +Testing your AI app means to execute it over a dataset and score it with an evaluation function. +This is done in Parea by defining & running experiments. +Below you can see can example of how to test a greeting bot with the Levenshtein distance metric. + +```python +from parea import Parea, trace +from parea.evals.general import levenshtein + +p = Parea(api_key="<>") # replace with Parea AI API key + +# use the trace decorator to score the output with the Levenshtein distance +@trace(eval_funcs=[levenshtein]) +def greeting(name: str) -> str: + return f"Hello {name}" + +data = [ + {"name": "Foo", "target": "Hi Foo"}, + {"name": "Bar", "target": "Hello Bar"}, +] + +p.experiment( + name="Greeting", + data=data, + func=greeting, +).run() +``` + +In the snippet above, we used the `trace` decorator to capture any inputs & outputs of the function. +This decorator also enables to score the output by executing the `levenshtein` eval in the background. +Then, we defined an experiment via `p.experiment` to evaluate our function (`greeting`) over a dataset (here a list of dictionaries). +Calling `run` will execute the experiment, and create a report of outputs, scores & traces for any sample of the dataset. +You can find a link to the executed experiment [here](). (todo: fill-in experiment) + + + +### More Resources + +Read more about how to run & analyze experiments. + +### Running Evals + + +### Writing Evals + + ## Evaluating Your LLM App You can evaluate any step of your LLM app by wrapping it with a decorator, called `trace`, and specifying the evaluation @@ -44,7 +92,7 @@ You can define evaluation functions locally or use the ones you have deployed to Parea's [Test Hub](https://app.parea.ai/test-hub). If you choose the latter option, the evaluation happens asynchronously and non-blocking. -A fully locally working cookbook can be found [here](parea/cookbook/openai/tracing_and_evaluating_openai_endpoint.py). +A fully locally working cookbook can be found [here](cookbook/openai/tracing_and_evaluating_openai_endpoint.py). Alternatively, you can add the following code to your codebase to get started: ```python @@ -64,60 +112,9 @@ def function_to_evaluate(*args, **kwargs) -> ...: ... ``` -### Run Experiments - -You can run an experiment for your LLM application by defining the `Experiment` class and passing it the name, the data and the -function you want to run. You need annotate the function with the `trace` decorator to trace its inputs, outputs, latency, etc. -as well as to specify which evaluation functions should be applied to it (as shown above). -```python -from parea import Experiment -Experiment( - name="Experiment Name", # Name of the experiment (str) - data=[{"n": "10"}], # Data to run the experiment on (list of dicts) - func=function_to_evaluate, # Function to run (callable) -) -``` - -Then you can run the experiment by using the `experiment` command and give it the path to the python file. -This will run your experiment with the specified inputs and create a report with the results which can be viewed under -the [Experiments tab](https://app.parea.ai/experiments). - -```bash -parea experiment -``` - -Full working example in our [docs](https://docs.parea.ai/evaluation/offline/experiments). - -## Debugging Chains & Agents - -You can iterate on your chains & agents much faster by using a local cache. This will allow you to make changes to your -code & prompts without waiting for all previous, valid LLM responses. Simply add these two lines to the beginning your -code and start -[a local redis cache](https://redis.io/docs/getting-started/install-stack/): - -```python -from parea import Parea, InMemoryCache - -Parea(cache=InMemoryCache()) -``` - -If you set `cache = None` for `Parea`, no cache will be used. - -### Benchmark your LLM app across many inputs - -You can benchmark your LLM app across many inputs by using the `benchmark` command. This will run your the entry point -of your app with the specified inputs and create a report with the results. - -```bash -parea benchmark --func app:main --csv_path benchmark.csv -``` - -The CSV file will be used to fill in the arguments to your function. The report will be a CSV file of all the traces. If -you -set your Parea API key, the traces will also be logged to the Parea dashboard. Note, for this feature you need to have a -redis cache running. Please, raise a GitHub issue if you would like to use this feature without a redis cache. +## Logging & Observability ### Automatically log all your LLM call traces @@ -136,7 +133,42 @@ Parea( ) ``` -## Use a deployed prompt +### Logging results from LLM providers [Example] + +```python +import os + +import openai +from dotenv import load_dotenv + +from parea import Parea + +load_dotenv() + +openai.api_key = os.getenv("OPENAI_API_KEY") + +p = Parea(api_key=os.getenv("PAREA_API_KEY")) + +x = "Golang" +y = "Fiber" +messages = [{ + "role": "user", + "content": f"Write a hello world program using {x} and the {y} framework." +}] +model = "gpt-3.5-turbo" +temperature = 0.0 + + +# define your OpenAI call as you would normally and we'll automatically log the results +def main(): + openai.chat.completions.create(model=model, temperature=temperature, messages=messages).choices[0].message.content +``` + +## Deploying Prompts + +Deployed prompts enable collaboration with non-engineers such as product managers & subject-matter experts. +Users can iterate, refine & test prompts on Parea's playground. +After tinkering, you can deploy that prompt which means that it is exposed via an API endpoint to integrate it into your application. ```python import os @@ -188,45 +220,7 @@ async def main_async(): print(deployed_prompt) ``` -### Logging results from LLM providers [Example] - -```python -import os - -import openai -from dotenv import load_dotenv - -from parea import Parea - -load_dotenv() - -openai.api_key = os.getenv("OPENAI_API_KEY") - -p = Parea(api_key=os.getenv("PAREA_API_KEY")) - -x = "Golang" -y = "Fiber" -messages = [{ - "role": "user", - "content": f"Write a hello world program using {x} and the {y} framework." -}] -model = "gpt-3.5-turbo" -temperature = 0.0 - - -# define your OpenAI call as you would normally and we'll automatically log the results -def main(): - openai.chat.completions.create(model=model, temperature=temperature, messages=messages).choices[0].message.content -``` - -### Open source community features - -Ready-to-use [Pull Requests templates](https://github.com/parea-ai/parea-sdk/blob/master/.github/PULL_REQUEST_TEMPLATE.md) -and several [Issue templates](https://github.com/parea-ai/parea-sdk/tree/master/.github/ISSUE_TEMPLATE). -- Files such as: `LICENSE`, `CONTRIBUTING.md`, `CODE_OF_CONDUCT.md`, and `SECURITY.md` are generated automatically. -- [Semantic Versions](https://semver.org/) specification - with [`Release Drafter`](https://github.com/marketplace/actions/release-drafter). ## 🛡 License @@ -239,7 +233,7 @@ See [LICENSE](https://github.com/parea-ai/parea-sdk/blob/master/LICENSE) for mor ```bibtex @misc{parea-sdk, - author = {joel-parea-ai}, + author = {joel-parea-ai,joschkabraun}, title = {Parea python sdk}, year = {2023}, publisher = {GitHub}, From 30b4b2af9d686ba711044c21b2c78cffb2ab91c1 Mon Sep 17 00:00:00 2001 From: Joschka Braun Date: Mon, 1 Jul 2024 09:44:05 -0400 Subject: [PATCH 2/4] refactor: move examples --- .gitignore | 2 +- .../anthropic/tracing_anthropic.py | 0 .../anthropic/tracing_anthropic_tool_use.py | 2 +- .../tracing_with_images_anthropic.py | 0 .../assets/data/2022-letter.txt | 0 .../assets/data}/__init__.py | 0 .../data/anthropic_tool_use_examples.py | 0 .../assets/data/openai_input_examples.py | 0 .../assets/data/state_of_the_union.txt | 0 .../assets/img/dashboard.png | Bin .../assets/img/dashboard_detailed_view.png | Bin .../assets/img/deployed_prompts.png | Bin .../assets/img/feedback.png | Bin .../cookbook => cookbook}/assets/img/logs.png | Bin .../assets/img/meta_data.png | Bin .../assets/img/trace_log_view.png | Bin .../dspy/dspy_examples.py | 0 .../dspy/dspy_threading.py | 0 .../tracing_and_evaluation_tutorial.ipynb | 0 .../endpoints_for_datasets.py | 0 .../RAG_experiment_with_auto_evals.py | 0 .../async_experiments.py | 0 .../deployed_prompt_and_dataset.py | 0 .../deployed_prompt_dataset_and_eval.py | 0 .../experiment_test_substeps.py | 0 .../evals_and_experiments/list_experiments.py | 0 .../modify_dataset_before_experiment.py | 0 .../evals_and_experiments/run_experiment.py | 0 .../run_experiment_balanced_acc.py | 0 .../run_experiment_evas_with_reason.py | 0 ..._experiment_using_saved_test_collection.py | 0 .../guidance/tracing_guidance.py | 0 .../instructor_blog_example_simple.py | 0 ...tructor_blog_example_validation_context.py | 0 .../instructor/instructor_evals.py | 0 .../instructor/instructor_streaming.py | 0 .../langchain/trace_class_call_method.py | 0 .../langchain/trace_langchain_RAG_evals.py | 0 .../trace_langchain_RAG_with_experiment.py | 0 ...ce_langchain_anthropic_function_calling.py | 0 ...ace_langchain_azure_RAG_with_experiment.py | 0 .../langchain/trace_langchain_bedrock_rag.py | 0 .../trace_langchain_inside_trace_decorator.py | 0 .../langchain/trace_langchain_rag_agents.py | 0 .../trace_langchain_rag_question_answering.py | 0 .../langchain/trace_langchain_simple.py | 0 .../trace_langchain_with_deployed_prompt.py | 0 .../marvin/trace_marvin.py | 0 .../dynamic_few_shot_injection_with_evals.py | 0 .../openai/simple_experiment_with_openai.py | 0 .../openai/trace_class_call_method.py | 0 .../tracing_and_evaluating_openai_endpoint.py | 0 .../openai/tracing_azure_open_ai.py | 2 +- .../openai/tracing_open_ai_streams.py | 2 +- .../tracing_openai_assistant_endpoint.py | 0 .../openai/tracing_templated_llm_calls.py | 0 .../openai/tracing_tool_calling.py | 0 .../openai/tracing_with_images_open_ai.py | 0 .../tracing_with_open_ai_endpoint_directly.py | 0 .../tracing_with_openai_requests_api.py | 2 +- .../tracing_with_openai_with_functions.py | 0 .../fetching_and_using_parea_deployments.py | 0 .../tracing_with_deployed_prompt.py | 0 .../dynamic_few_shot_injection.py | 0 .../tracing_with_Parea_sdk.ipynb | 0 .../parea_llm_proxy/tracing_with_agent.py | 0 ...ing_with_function_calling_and_chains.ipynb | 0 .../tracing_with_parea_streaming.py | 0 .../tracing_without_deployed_prompt.py | 0 .../tracing_with_threading.py | 0 .../use_dataset_for_finetuning.py | 0 docker/Dockerfile | 25 ---------- docker/README.md | 47 ------------------ parea/cookbook/assets/data/__init__.py | 0 parea/cookbook/parea_llm_proxy/__init__.py | 0 .../parea_llm_proxy/deployments/__init__.py | 0 76 files changed, 5 insertions(+), 77 deletions(-) rename {parea/cookbook => cookbook}/anthropic/tracing_anthropic.py (100%) rename {parea/cookbook => cookbook}/anthropic/tracing_anthropic_tool_use.py (87%) rename {parea/cookbook => cookbook}/anthropic/tracing_with_images_anthropic.py (100%) rename {parea/cookbook => cookbook}/assets/data/2022-letter.txt (100%) rename {parea/cookbook => cookbook/assets/data}/__init__.py (100%) rename {parea/cookbook => cookbook}/assets/data/anthropic_tool_use_examples.py (100%) rename {parea/cookbook => cookbook}/assets/data/openai_input_examples.py (100%) rename {parea/cookbook => cookbook}/assets/data/state_of_the_union.txt (100%) rename {parea/cookbook => cookbook}/assets/img/dashboard.png (100%) rename {parea/cookbook => cookbook}/assets/img/dashboard_detailed_view.png (100%) rename {parea/cookbook => cookbook}/assets/img/deployed_prompts.png (100%) rename {parea/cookbook => cookbook}/assets/img/feedback.png (100%) rename {parea/cookbook => cookbook}/assets/img/logs.png (100%) rename {parea/cookbook => cookbook}/assets/img/meta_data.png (100%) rename {parea/cookbook => cookbook}/assets/img/trace_log_view.png (100%) rename {parea/cookbook => cookbook}/dspy/dspy_examples.py (100%) rename {parea/cookbook => cookbook}/dspy/dspy_threading.py (100%) rename {parea/cookbook => cookbook}/dspy/tracing_and_evaluation_tutorial.ipynb (100%) rename parea/cookbook/enpoints_for_datasets.py => cookbook/endpoints_for_datasets.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/RAG_experiment_with_auto_evals.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/async_experiments.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/deployed_prompt_and_dataset.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/deployed_prompt_dataset_and_eval.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/experiment_test_substeps.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/list_experiments.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/modify_dataset_before_experiment.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/run_experiment.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/run_experiment_balanced_acc.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/run_experiment_evas_with_reason.py (100%) rename {parea/cookbook => cookbook}/evals_and_experiments/run_experiment_using_saved_test_collection.py (100%) rename {parea/cookbook => cookbook}/guidance/tracing_guidance.py (100%) rename {parea/cookbook => cookbook}/instructor/instructor_blog_example_simple.py (100%) rename {parea/cookbook => cookbook}/instructor/instructor_blog_example_validation_context.py (100%) rename {parea/cookbook => cookbook}/instructor/instructor_evals.py (100%) rename {parea/cookbook => cookbook}/instructor/instructor_streaming.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_class_call_method.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_RAG_evals.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_RAG_with_experiment.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_anthropic_function_calling.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_azure_RAG_with_experiment.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_bedrock_rag.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_inside_trace_decorator.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_rag_agents.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_rag_question_answering.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_simple.py (100%) rename {parea/cookbook => cookbook}/langchain/trace_langchain_with_deployed_prompt.py (100%) rename {parea/cookbook => cookbook}/marvin/trace_marvin.py (100%) rename {parea/cookbook => cookbook}/openai/dynamic_few_shot_injection_with_evals.py (100%) rename {parea/cookbook => cookbook}/openai/simple_experiment_with_openai.py (100%) rename {parea/cookbook => cookbook}/openai/trace_class_call_method.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_and_evaluating_openai_endpoint.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_azure_open_ai.py (94%) rename {parea/cookbook => cookbook}/openai/tracing_open_ai_streams.py (91%) rename {parea/cookbook => cookbook}/openai/tracing_openai_assistant_endpoint.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_templated_llm_calls.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_tool_calling.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_with_images_open_ai.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_with_open_ai_endpoint_directly.py (100%) rename {parea/cookbook => cookbook}/openai/tracing_with_openai_requests_api.py (95%) rename {parea/cookbook => cookbook}/openai/tracing_with_openai_with_functions.py (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/dynamic_few_shot_injection.py (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/tracing_with_Parea_sdk.ipynb (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/tracing_with_agent.py (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/tracing_with_function_calling_and_chains.ipynb (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/tracing_with_parea_streaming.py (100%) rename {parea/cookbook => cookbook}/parea_llm_proxy/tracing_without_deployed_prompt.py (100%) rename {parea/cookbook => cookbook}/tracing_with_threading.py (100%) rename {parea/cookbook => cookbook}/use_dataset_for_finetuning.py (100%) delete mode 100644 docker/Dockerfile delete mode 100644 docker/README.md delete mode 100644 parea/cookbook/assets/data/__init__.py delete mode 100644 parea/cookbook/parea_llm_proxy/__init__.py delete mode 100644 parea/cookbook/parea_llm_proxy/deployments/__init__.py diff --git a/.gitignore b/.gitignore index 50c275c3..cdfecf56 100644 --- a/.gitignore +++ b/.gitignore @@ -619,4 +619,4 @@ MigrationBackup/ /LocalREADME.md LocalREADME.md .env -/parea/cookbook/tmp/ +/cookbook/tmp/ diff --git a/parea/cookbook/anthropic/tracing_anthropic.py b/cookbook/anthropic/tracing_anthropic.py similarity index 100% rename from parea/cookbook/anthropic/tracing_anthropic.py rename to cookbook/anthropic/tracing_anthropic.py diff --git a/parea/cookbook/anthropic/tracing_anthropic_tool_use.py b/cookbook/anthropic/tracing_anthropic_tool_use.py similarity index 87% rename from parea/cookbook/anthropic/tracing_anthropic_tool_use.py rename to cookbook/anthropic/tracing_anthropic_tool_use.py index f9466137..e90a5ff4 100644 --- a/parea/cookbook/anthropic/tracing_anthropic_tool_use.py +++ b/cookbook/anthropic/tracing_anthropic_tool_use.py @@ -4,7 +4,7 @@ from dotenv import load_dotenv from parea import Parea -from parea.cookbook.assets.data.anthropic_tool_use_examples import missing_information, multiple_tool_use, single_tool_use +from cookbook.assets.data.anthropic_tool_use_examples import missing_information, multiple_tool_use, single_tool_use load_dotenv() diff --git a/parea/cookbook/anthropic/tracing_with_images_anthropic.py b/cookbook/anthropic/tracing_with_images_anthropic.py similarity index 100% rename from parea/cookbook/anthropic/tracing_with_images_anthropic.py rename to cookbook/anthropic/tracing_with_images_anthropic.py diff --git a/parea/cookbook/assets/data/2022-letter.txt b/cookbook/assets/data/2022-letter.txt similarity index 100% rename from parea/cookbook/assets/data/2022-letter.txt rename to cookbook/assets/data/2022-letter.txt diff --git a/parea/cookbook/__init__.py b/cookbook/assets/data/__init__.py similarity index 100% rename from parea/cookbook/__init__.py rename to cookbook/assets/data/__init__.py diff --git a/parea/cookbook/assets/data/anthropic_tool_use_examples.py b/cookbook/assets/data/anthropic_tool_use_examples.py similarity index 100% rename from parea/cookbook/assets/data/anthropic_tool_use_examples.py rename to cookbook/assets/data/anthropic_tool_use_examples.py diff --git a/parea/cookbook/assets/data/openai_input_examples.py b/cookbook/assets/data/openai_input_examples.py similarity index 100% rename from parea/cookbook/assets/data/openai_input_examples.py rename to cookbook/assets/data/openai_input_examples.py diff --git a/parea/cookbook/assets/data/state_of_the_union.txt b/cookbook/assets/data/state_of_the_union.txt similarity index 100% rename from parea/cookbook/assets/data/state_of_the_union.txt rename to cookbook/assets/data/state_of_the_union.txt diff --git a/parea/cookbook/assets/img/dashboard.png b/cookbook/assets/img/dashboard.png similarity index 100% rename from parea/cookbook/assets/img/dashboard.png rename to cookbook/assets/img/dashboard.png diff --git a/parea/cookbook/assets/img/dashboard_detailed_view.png b/cookbook/assets/img/dashboard_detailed_view.png similarity index 100% rename from parea/cookbook/assets/img/dashboard_detailed_view.png rename to cookbook/assets/img/dashboard_detailed_view.png diff --git a/parea/cookbook/assets/img/deployed_prompts.png b/cookbook/assets/img/deployed_prompts.png similarity index 100% rename from parea/cookbook/assets/img/deployed_prompts.png rename to cookbook/assets/img/deployed_prompts.png diff --git a/parea/cookbook/assets/img/feedback.png b/cookbook/assets/img/feedback.png similarity index 100% rename from parea/cookbook/assets/img/feedback.png rename to cookbook/assets/img/feedback.png diff --git a/parea/cookbook/assets/img/logs.png b/cookbook/assets/img/logs.png similarity index 100% rename from parea/cookbook/assets/img/logs.png rename to cookbook/assets/img/logs.png diff --git a/parea/cookbook/assets/img/meta_data.png b/cookbook/assets/img/meta_data.png similarity index 100% rename from parea/cookbook/assets/img/meta_data.png rename to cookbook/assets/img/meta_data.png diff --git a/parea/cookbook/assets/img/trace_log_view.png b/cookbook/assets/img/trace_log_view.png similarity index 100% rename from parea/cookbook/assets/img/trace_log_view.png rename to cookbook/assets/img/trace_log_view.png diff --git a/parea/cookbook/dspy/dspy_examples.py b/cookbook/dspy/dspy_examples.py similarity index 100% rename from parea/cookbook/dspy/dspy_examples.py rename to cookbook/dspy/dspy_examples.py diff --git a/parea/cookbook/dspy/dspy_threading.py b/cookbook/dspy/dspy_threading.py similarity index 100% rename from parea/cookbook/dspy/dspy_threading.py rename to cookbook/dspy/dspy_threading.py diff --git a/parea/cookbook/dspy/tracing_and_evaluation_tutorial.ipynb b/cookbook/dspy/tracing_and_evaluation_tutorial.ipynb similarity index 100% rename from parea/cookbook/dspy/tracing_and_evaluation_tutorial.ipynb rename to cookbook/dspy/tracing_and_evaluation_tutorial.ipynb diff --git a/parea/cookbook/enpoints_for_datasets.py b/cookbook/endpoints_for_datasets.py similarity index 100% rename from parea/cookbook/enpoints_for_datasets.py rename to cookbook/endpoints_for_datasets.py diff --git a/parea/cookbook/evals_and_experiments/RAG_experiment_with_auto_evals.py b/cookbook/evals_and_experiments/RAG_experiment_with_auto_evals.py similarity index 100% rename from parea/cookbook/evals_and_experiments/RAG_experiment_with_auto_evals.py rename to cookbook/evals_and_experiments/RAG_experiment_with_auto_evals.py diff --git a/parea/cookbook/evals_and_experiments/async_experiments.py b/cookbook/evals_and_experiments/async_experiments.py similarity index 100% rename from parea/cookbook/evals_and_experiments/async_experiments.py rename to cookbook/evals_and_experiments/async_experiments.py diff --git a/parea/cookbook/evals_and_experiments/deployed_prompt_and_dataset.py b/cookbook/evals_and_experiments/deployed_prompt_and_dataset.py similarity index 100% rename from parea/cookbook/evals_and_experiments/deployed_prompt_and_dataset.py rename to cookbook/evals_and_experiments/deployed_prompt_and_dataset.py diff --git a/parea/cookbook/evals_and_experiments/deployed_prompt_dataset_and_eval.py b/cookbook/evals_and_experiments/deployed_prompt_dataset_and_eval.py similarity index 100% rename from parea/cookbook/evals_and_experiments/deployed_prompt_dataset_and_eval.py rename to cookbook/evals_and_experiments/deployed_prompt_dataset_and_eval.py diff --git a/parea/cookbook/evals_and_experiments/experiment_test_substeps.py b/cookbook/evals_and_experiments/experiment_test_substeps.py similarity index 100% rename from parea/cookbook/evals_and_experiments/experiment_test_substeps.py rename to cookbook/evals_and_experiments/experiment_test_substeps.py diff --git a/parea/cookbook/evals_and_experiments/list_experiments.py b/cookbook/evals_and_experiments/list_experiments.py similarity index 100% rename from parea/cookbook/evals_and_experiments/list_experiments.py rename to cookbook/evals_and_experiments/list_experiments.py diff --git a/parea/cookbook/evals_and_experiments/modify_dataset_before_experiment.py b/cookbook/evals_and_experiments/modify_dataset_before_experiment.py similarity index 100% rename from parea/cookbook/evals_and_experiments/modify_dataset_before_experiment.py rename to cookbook/evals_and_experiments/modify_dataset_before_experiment.py diff --git a/parea/cookbook/evals_and_experiments/run_experiment.py b/cookbook/evals_and_experiments/run_experiment.py similarity index 100% rename from parea/cookbook/evals_and_experiments/run_experiment.py rename to cookbook/evals_and_experiments/run_experiment.py diff --git a/parea/cookbook/evals_and_experiments/run_experiment_balanced_acc.py b/cookbook/evals_and_experiments/run_experiment_balanced_acc.py similarity index 100% rename from parea/cookbook/evals_and_experiments/run_experiment_balanced_acc.py rename to cookbook/evals_and_experiments/run_experiment_balanced_acc.py diff --git a/parea/cookbook/evals_and_experiments/run_experiment_evas_with_reason.py b/cookbook/evals_and_experiments/run_experiment_evas_with_reason.py similarity index 100% rename from parea/cookbook/evals_and_experiments/run_experiment_evas_with_reason.py rename to cookbook/evals_and_experiments/run_experiment_evas_with_reason.py diff --git a/parea/cookbook/evals_and_experiments/run_experiment_using_saved_test_collection.py b/cookbook/evals_and_experiments/run_experiment_using_saved_test_collection.py similarity index 100% rename from parea/cookbook/evals_and_experiments/run_experiment_using_saved_test_collection.py rename to cookbook/evals_and_experiments/run_experiment_using_saved_test_collection.py diff --git a/parea/cookbook/guidance/tracing_guidance.py b/cookbook/guidance/tracing_guidance.py similarity index 100% rename from parea/cookbook/guidance/tracing_guidance.py rename to cookbook/guidance/tracing_guidance.py diff --git a/parea/cookbook/instructor/instructor_blog_example_simple.py b/cookbook/instructor/instructor_blog_example_simple.py similarity index 100% rename from parea/cookbook/instructor/instructor_blog_example_simple.py rename to cookbook/instructor/instructor_blog_example_simple.py diff --git a/parea/cookbook/instructor/instructor_blog_example_validation_context.py b/cookbook/instructor/instructor_blog_example_validation_context.py similarity index 100% rename from parea/cookbook/instructor/instructor_blog_example_validation_context.py rename to cookbook/instructor/instructor_blog_example_validation_context.py diff --git a/parea/cookbook/instructor/instructor_evals.py b/cookbook/instructor/instructor_evals.py similarity index 100% rename from parea/cookbook/instructor/instructor_evals.py rename to cookbook/instructor/instructor_evals.py diff --git a/parea/cookbook/instructor/instructor_streaming.py b/cookbook/instructor/instructor_streaming.py similarity index 100% rename from parea/cookbook/instructor/instructor_streaming.py rename to cookbook/instructor/instructor_streaming.py diff --git a/parea/cookbook/langchain/trace_class_call_method.py b/cookbook/langchain/trace_class_call_method.py similarity index 100% rename from parea/cookbook/langchain/trace_class_call_method.py rename to cookbook/langchain/trace_class_call_method.py diff --git a/parea/cookbook/langchain/trace_langchain_RAG_evals.py b/cookbook/langchain/trace_langchain_RAG_evals.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_RAG_evals.py rename to cookbook/langchain/trace_langchain_RAG_evals.py diff --git a/parea/cookbook/langchain/trace_langchain_RAG_with_experiment.py b/cookbook/langchain/trace_langchain_RAG_with_experiment.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_RAG_with_experiment.py rename to cookbook/langchain/trace_langchain_RAG_with_experiment.py diff --git a/parea/cookbook/langchain/trace_langchain_anthropic_function_calling.py b/cookbook/langchain/trace_langchain_anthropic_function_calling.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_anthropic_function_calling.py rename to cookbook/langchain/trace_langchain_anthropic_function_calling.py diff --git a/parea/cookbook/langchain/trace_langchain_azure_RAG_with_experiment.py b/cookbook/langchain/trace_langchain_azure_RAG_with_experiment.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_azure_RAG_with_experiment.py rename to cookbook/langchain/trace_langchain_azure_RAG_with_experiment.py diff --git a/parea/cookbook/langchain/trace_langchain_bedrock_rag.py b/cookbook/langchain/trace_langchain_bedrock_rag.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_bedrock_rag.py rename to cookbook/langchain/trace_langchain_bedrock_rag.py diff --git a/parea/cookbook/langchain/trace_langchain_inside_trace_decorator.py b/cookbook/langchain/trace_langchain_inside_trace_decorator.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_inside_trace_decorator.py rename to cookbook/langchain/trace_langchain_inside_trace_decorator.py diff --git a/parea/cookbook/langchain/trace_langchain_rag_agents.py b/cookbook/langchain/trace_langchain_rag_agents.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_rag_agents.py rename to cookbook/langchain/trace_langchain_rag_agents.py diff --git a/parea/cookbook/langchain/trace_langchain_rag_question_answering.py b/cookbook/langchain/trace_langchain_rag_question_answering.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_rag_question_answering.py rename to cookbook/langchain/trace_langchain_rag_question_answering.py diff --git a/parea/cookbook/langchain/trace_langchain_simple.py b/cookbook/langchain/trace_langchain_simple.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_simple.py rename to cookbook/langchain/trace_langchain_simple.py diff --git a/parea/cookbook/langchain/trace_langchain_with_deployed_prompt.py b/cookbook/langchain/trace_langchain_with_deployed_prompt.py similarity index 100% rename from parea/cookbook/langchain/trace_langchain_with_deployed_prompt.py rename to cookbook/langchain/trace_langchain_with_deployed_prompt.py diff --git a/parea/cookbook/marvin/trace_marvin.py b/cookbook/marvin/trace_marvin.py similarity index 100% rename from parea/cookbook/marvin/trace_marvin.py rename to cookbook/marvin/trace_marvin.py diff --git a/parea/cookbook/openai/dynamic_few_shot_injection_with_evals.py b/cookbook/openai/dynamic_few_shot_injection_with_evals.py similarity index 100% rename from parea/cookbook/openai/dynamic_few_shot_injection_with_evals.py rename to cookbook/openai/dynamic_few_shot_injection_with_evals.py diff --git a/parea/cookbook/openai/simple_experiment_with_openai.py b/cookbook/openai/simple_experiment_with_openai.py similarity index 100% rename from parea/cookbook/openai/simple_experiment_with_openai.py rename to cookbook/openai/simple_experiment_with_openai.py diff --git a/parea/cookbook/openai/trace_class_call_method.py b/cookbook/openai/trace_class_call_method.py similarity index 100% rename from parea/cookbook/openai/trace_class_call_method.py rename to cookbook/openai/trace_class_call_method.py diff --git a/parea/cookbook/openai/tracing_and_evaluating_openai_endpoint.py b/cookbook/openai/tracing_and_evaluating_openai_endpoint.py similarity index 100% rename from parea/cookbook/openai/tracing_and_evaluating_openai_endpoint.py rename to cookbook/openai/tracing_and_evaluating_openai_endpoint.py diff --git a/parea/cookbook/openai/tracing_azure_open_ai.py b/cookbook/openai/tracing_azure_open_ai.py similarity index 94% rename from parea/cookbook/openai/tracing_azure_open_ai.py rename to cookbook/openai/tracing_azure_open_ai.py index 22045c0d..4be3a007 100644 --- a/parea/cookbook/openai/tracing_azure_open_ai.py +++ b/cookbook/openai/tracing_azure_open_ai.py @@ -5,7 +5,7 @@ from openai.lib.azure import AsyncAzureOpenAI, AzureOpenAI from parea import Parea, trace -from parea.cookbook.assets.data.openai_input_examples import functions_example, simple_example +from cookbook.assets.data.openai_input_examples import functions_example, simple_example load_dotenv() diff --git a/parea/cookbook/openai/tracing_open_ai_streams.py b/cookbook/openai/tracing_open_ai_streams.py similarity index 91% rename from parea/cookbook/openai/tracing_open_ai_streams.py rename to cookbook/openai/tracing_open_ai_streams.py index e02f59ff..4823f1b7 100644 --- a/parea/cookbook/openai/tracing_open_ai_streams.py +++ b/cookbook/openai/tracing_open_ai_streams.py @@ -5,7 +5,7 @@ from openai import AsyncOpenAI, OpenAI from parea import Parea, trace -from parea.cookbook.assets.data.openai_input_examples import functions_example, simple_example_json +from cookbook.assets.data.openai_input_examples import functions_example, simple_example_json load_dotenv() diff --git a/parea/cookbook/openai/tracing_openai_assistant_endpoint.py b/cookbook/openai/tracing_openai_assistant_endpoint.py similarity index 100% rename from parea/cookbook/openai/tracing_openai_assistant_endpoint.py rename to cookbook/openai/tracing_openai_assistant_endpoint.py diff --git a/parea/cookbook/openai/tracing_templated_llm_calls.py b/cookbook/openai/tracing_templated_llm_calls.py similarity index 100% rename from parea/cookbook/openai/tracing_templated_llm_calls.py rename to cookbook/openai/tracing_templated_llm_calls.py diff --git a/parea/cookbook/openai/tracing_tool_calling.py b/cookbook/openai/tracing_tool_calling.py similarity index 100% rename from parea/cookbook/openai/tracing_tool_calling.py rename to cookbook/openai/tracing_tool_calling.py diff --git a/parea/cookbook/openai/tracing_with_images_open_ai.py b/cookbook/openai/tracing_with_images_open_ai.py similarity index 100% rename from parea/cookbook/openai/tracing_with_images_open_ai.py rename to cookbook/openai/tracing_with_images_open_ai.py diff --git a/parea/cookbook/openai/tracing_with_open_ai_endpoint_directly.py b/cookbook/openai/tracing_with_open_ai_endpoint_directly.py similarity index 100% rename from parea/cookbook/openai/tracing_with_open_ai_endpoint_directly.py rename to cookbook/openai/tracing_with_open_ai_endpoint_directly.py diff --git a/parea/cookbook/openai/tracing_with_openai_requests_api.py b/cookbook/openai/tracing_with_openai_requests_api.py similarity index 95% rename from parea/cookbook/openai/tracing_with_openai_requests_api.py rename to cookbook/openai/tracing_with_openai_requests_api.py index ce2863ea..55eb92c5 100644 --- a/parea/cookbook/openai/tracing_with_openai_requests_api.py +++ b/cookbook/openai/tracing_with_openai_requests_api.py @@ -4,7 +4,7 @@ from dotenv import load_dotenv from parea import Parea, aprocess_stream_and_yield, convert_openai_raw_to_log, process_stream_and_yield, trace -from parea.cookbook.assets.data.openai_input_examples import functions_example, simple_example, tool_calling_example +from cookbook.assets.data.openai_input_examples import functions_example, simple_example, tool_calling_example from parea.wrapper import get_formatted_openai_response load_dotenv() diff --git a/parea/cookbook/openai/tracing_with_openai_with_functions.py b/cookbook/openai/tracing_with_openai_with_functions.py similarity index 100% rename from parea/cookbook/openai/tracing_with_openai_with_functions.py rename to cookbook/openai/tracing_with_openai_with_functions.py diff --git a/parea/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py b/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py similarity index 100% rename from parea/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py rename to cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py diff --git a/parea/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py b/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py similarity index 100% rename from parea/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py rename to cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py diff --git a/parea/cookbook/parea_llm_proxy/dynamic_few_shot_injection.py b/cookbook/parea_llm_proxy/dynamic_few_shot_injection.py similarity index 100% rename from parea/cookbook/parea_llm_proxy/dynamic_few_shot_injection.py rename to cookbook/parea_llm_proxy/dynamic_few_shot_injection.py diff --git a/parea/cookbook/parea_llm_proxy/tracing_with_Parea_sdk.ipynb b/cookbook/parea_llm_proxy/tracing_with_Parea_sdk.ipynb similarity index 100% rename from parea/cookbook/parea_llm_proxy/tracing_with_Parea_sdk.ipynb rename to cookbook/parea_llm_proxy/tracing_with_Parea_sdk.ipynb diff --git a/parea/cookbook/parea_llm_proxy/tracing_with_agent.py b/cookbook/parea_llm_proxy/tracing_with_agent.py similarity index 100% rename from parea/cookbook/parea_llm_proxy/tracing_with_agent.py rename to cookbook/parea_llm_proxy/tracing_with_agent.py diff --git a/parea/cookbook/parea_llm_proxy/tracing_with_function_calling_and_chains.ipynb b/cookbook/parea_llm_proxy/tracing_with_function_calling_and_chains.ipynb similarity index 100% rename from parea/cookbook/parea_llm_proxy/tracing_with_function_calling_and_chains.ipynb rename to cookbook/parea_llm_proxy/tracing_with_function_calling_and_chains.ipynb diff --git a/parea/cookbook/parea_llm_proxy/tracing_with_parea_streaming.py b/cookbook/parea_llm_proxy/tracing_with_parea_streaming.py similarity index 100% rename from parea/cookbook/parea_llm_proxy/tracing_with_parea_streaming.py rename to cookbook/parea_llm_proxy/tracing_with_parea_streaming.py diff --git a/parea/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py b/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py similarity index 100% rename from parea/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py rename to cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py diff --git a/parea/cookbook/tracing_with_threading.py b/cookbook/tracing_with_threading.py similarity index 100% rename from parea/cookbook/tracing_with_threading.py rename to cookbook/tracing_with_threading.py diff --git a/parea/cookbook/use_dataset_for_finetuning.py b/cookbook/use_dataset_for_finetuning.py similarity index 100% rename from parea/cookbook/use_dataset_for_finetuning.py rename to cookbook/use_dataset_for_finetuning.py diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 5cdc06f4..00000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM python:3.9-slim-buster - -ENV LANG=C.UTF-8 \ - LC_ALL=C.UTF-8 \ - PATH="${PATH}:/root/.poetry/bin" - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - curl \ - && rm -rf /var/lib/apt/lists/* - -COPY pyproject.toml ./ - -# Install Poetry -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/install-poetry.py | POETRY_HOME=/opt/poetry python && \ - cd /usr/local/bin && \ - ln -s /opt/poetry/bin/poetry && \ - poetry config virtualenvs.create false - -# Allow installing dev dependencies to run tests -ARG INSTALL_DEV=false -RUN bash -c "if [ $INSTALL_DEV == 'true' ] ; then poetry install --no-root ; else poetry install --no-root --no-dev ; fi" - -CMD mkdir -p /workspace -WORKDIR /workspace diff --git a/docker/README.md b/docker/README.md deleted file mode 100644 index 477a9c53..00000000 --- a/docker/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Docker for parea-sdk - -## Installation - -To create Docker you need to run: - -```bash -make docker-build -``` - -which is equivalent to: - -```bash -make docker-build VERSION=latest -``` - -You may provide name and version for the image. -Default name is `IMAGE := parea`. -Default version is `VERSION := latest`. - -```bash -make docker-build IMAGE=some_name VERSION=0.1.0 -``` - -## Usage - -```bash -docker run -it --rm \ - -v $(pwd):/workspace \ - parea bash -``` - -## How to clean up - -To uninstall docker image run `make docker-remove` with `VERSION`: - -```bash -make docker-remove VERSION=0.1.0 -``` - -you may also choose the image name - -```bash -make docker-remove IMAGE=some_name VERSION=latest -``` - -If you want to clean all, including `build` and `pycache` run `make cleanup` diff --git a/parea/cookbook/assets/data/__init__.py b/parea/cookbook/assets/data/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/parea/cookbook/parea_llm_proxy/__init__.py b/parea/cookbook/parea_llm_proxy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/parea/cookbook/parea_llm_proxy/deployments/__init__.py b/parea/cookbook/parea_llm_proxy/deployments/__init__.py deleted file mode 100644 index e69de29b..00000000 From e8319445000feaa59ba94b06ecbdb80987468ecb Mon Sep 17 00:00:00 2001 From: Joschka Braun Date: Mon, 1 Jul 2024 14:52:40 -0400 Subject: [PATCH 3/4] docs: update readme --- README.md | 189 +++++++++--------- assets/PareaLogoLight.png | Bin 0 -> 18626 bytes .../anthropic/tracing_anthropic_tool_use.py | 2 +- cookbook/openai/tracing_azure_open_ai.py | 2 +- cookbook/openai/tracing_open_ai_streams.py | 2 +- .../tracing_with_openai_requests_api.py | 2 +- parea/wrapper/anthropic/anthropic.py | 3 +- 7 files changed, 104 insertions(+), 96 deletions(-) create mode 100644 assets/PareaLogoLight.png diff --git a/README.md b/README.md index 6a5566e4..b75c817f 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,35 @@ -# Evaluate Your AI Application with Parea's Python SDK +

+Test, evaluate & monitor your AI application +

-
+

+Test, evaluate & monitor your AI application +

-[![Build status](https://github.com/parea-ai/parea-sdk/workflows/build/badge.svg?branch=master&event=push)](https://github.com/parea-ai/parea-sdk/actions?query=workflow%3Abuild) -[![Dependencies Status](https://img.shields.io/badge/dependencies-up%20to%20date-brightgreen.svg)](https://github.com/parea-ai/parea-sdk/pulls?utf8=%E2%9C%93&q=is%3Apr%20author%3Aapp%2Fdependabot) -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +

+PyPI +PyPI - Downloads from official pypistats +License +

-[![Pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://github.com/parea-ai/parea-sdk/blob/master/.pre-commit-config.yaml) -[![Semantic Versions](https://img.shields.io/badge/%20%20%F0%9F%93%A6%F0%9F%9A%80-semantic--versions-e10079.svg)](https://github.com/parea-ai/parea-sdk/releases) -[![License](https://img.shields.io/github/license/parea-ai/parea-sdk)](https://github.com/parea-ai/parea-sdk/blob/main/LICENSE) +

+🐦 Twitter/X +  •   +📢 Discord +  •   +Parea AI +  •   +📙 Documentation +

-
-[Parea AI](https://www.parea.ai) provides a SDK to evaluate & monitor your AI applications. +[Parea AI](https://www.parea.ai) provides a SDK to evaluate & monitor your AI applications. Below you can see quickstarts to: -[Python SDK Docs](https://docs.parea.ai/api-reference/sdk/python) +- [evaluate & test](#evaluating-your-llm-app) your LLM App +- [instrument logging & observability](#logging--observability) for your LLM App +- [deploying prompts](#deploying-prompts) to enable collaboration between engineers & subject-matter experts + +Our full docs are [here](https://docs.parea.ai/). ## Installation @@ -61,107 +76,113 @@ p.experiment( In the snippet above, we used the `trace` decorator to capture any inputs & outputs of the function. This decorator also enables to score the output by executing the `levenshtein` eval in the background. Then, we defined an experiment via `p.experiment` to evaluate our function (`greeting`) over a dataset (here a list of dictionaries). -Calling `run` will execute the experiment, and create a report of outputs, scores & traces for any sample of the dataset. +Finally, calling `run` will execute the experiment, and create a report of outputs, scores & traces for any sample of the dataset. You can find a link to the executed experiment [here](). (todo: fill-in experiment) ### More Resources -Read more about how to run & analyze experiments. - -### Running Evals - - -### Writing Evals +Read more about how to write, run & analyze experiments in our [docs](https://docs.parea.ai/evaluation/overview). -## Evaluating Your LLM App - -You can evaluate any step of your LLM app by wrapping it with a decorator, called `trace`, and specifying the evaluation -function(s). -The scores associated with the traces will be logged to the Parea [dashboard](https://app.parea.ai/logs) and/or in a -local CSV file if you don't have a Parea API key. +## Logging & Observability -Evaluation functions receive an argument `log` (of type [Log](parea/schemas/models.py)) and should return a -float. You don't need to start from scratch, there are pre-defined evaluation -functions for [general purpose](parea/evals/general), -[chat](parea/evals/chat), [RAG](parea/evals/rag), and [summarization](parea/evals/summary) apps :) +By wrapping the respective clients, you can automatically log all your LLM calls to OpenAI & Anthropic. +Additionally, using the `trace` decorator you can create hierarchical traces of your LLM application to e.g. associate LLM calls with the retrieval step of a RAG pipeline. +You can see the full observability documentation [here](https://docs.parea.ai/observability/overview) and our integrations into LangChain, Instructor, DSPy, LiteLLM & more [here](https://docs.parea.ai/integrations/langchain). -You can define evaluation functions locally or use the ones you have deployed to -Parea's [Test Hub](https://app.parea.ai/test-hub). -If you choose the latter option, the evaluation happens asynchronously and non-blocking. +### Automatically log all your OpenAI calls -A fully locally working cookbook can be found [here](cookbook/openai/tracing_and_evaluating_openai_endpoint.py). -Alternatively, you can add the following code to your codebase to get started: +To automatically log any OpenAI call, you can wrap the OpenAI client with the Parea client using the `wrap_openai_client` method. ```python -import os -from parea import Parea, InMemoryCache, trace -from parea.schemas.log import Log - -Parea(api_key=os.getenv("PAREA_API_KEY"), cache=InMemoryCache()) # use InMemoryCache if you don't have a Parea API key - +from openai import OpenAI +from parea import Parea -def locally_defined_eval_function(log: Log) -> float: - ... +client = OpenAI(api_key="OPENAI_API_KEY") +# All you need to do is add these two lines +p = Parea(api_key="PAREA_API_KEY") # replace with your API key +p.wrap_openai_client(client) -@trace(eval_func_names=['deployed_eval_function_name'], eval_funcs=[locally_defined_eval_function]) -def function_to_evaluate(*args, **kwargs) -> ...: - ... +response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "Write a Hello World program in Python using FastAPI.", + } + ], +) +print(response.choices[0].message.content) ``` +### Automatically log all your Anthropic calls - -## Logging & Observability - -### Automatically log all your LLM call traces - -You can automatically log all your LLM traces to the Parea dashboard by setting the `PAREA_API_KEY` environment variable -or specifying it in the `Parea` initialization. -This will help you debug issues your customers are facing by stepping through the LLM call traces and recreating the -issue -in your local setup & code. +To automatically log any Anthropic call, you can wrap the Anthropic client with the Parea client using the `wrap_anthropic_client` method. ```python +import anthropic from parea import Parea -Parea( - api_key=os.getenv("PAREA_API_KEY"), # default value - cache=... +p = Parea(api_key="PAREA_API_KEY") # replace with your API key + +client = anthropic.Anthropic() +p.wrap_anthropic_client(client) + +message = client.messages.create( + model="claude-3-opus-20240229", + max_tokens=1024, + messages=[ + { + "role": "user", + "content": "Write a Hello World program in Python using FastAPI.", + } + ], ) +print(message.content[0].text) ``` -### Logging results from LLM providers [Example] +### Nested traces + +By using the `trace` decorator, you can create hierarchical traces of your LLM application. ```python -import os +from openai import OpenAI +from parea import Parea, trace -import openai -from dotenv import load_dotenv +client = OpenAI(api_key="OPENAI_API_KEY") # replace with your API key -from parea import Parea +p = Parea(api_key="PAREA_API_KEY") # replace with your API key +p.wrap_openai_client(client) -load_dotenv() -openai.api_key = os.getenv("OPENAI_API_KEY") +# We generally recommend creating a helper function to make LLM API calls. +def llm(messages: list[dict[str, str]]) -> str: + response = client.chat.completions.create(model="gpt-4o", messages=messages) + return response.choices[0].message.content -p = Parea(api_key=os.getenv("PAREA_API_KEY")) -x = "Golang" -y = "Fiber" -messages = [{ - "role": "user", - "content": f"Write a hello world program using {x} and the {y} framework." -}] -model = "gpt-3.5-turbo" -temperature = 0.0 +# This will give the Span the name of the function. +# Without the decorator the default name for all LLM call logs is `llm-openai` +@trace +def hello_world(lang: str, framework: str): + return llm([{"role": "user", "content": f"Write a Hello World program in {lang} using {framework}."}]) +@trace +def critique_code(code: str): + return llm([{"role": "user", "content": f"How can we improve this code: \n {code}"}]) -# define your OpenAI call as you would normally and we'll automatically log the results -def main(): - openai.chat.completions.create(model=model, temperature=temperature, messages=messages).choices[0].message.content +# Our top level function is called chain. By adding the trace decorator here, +# all sub-functions will automatically be logged and associated with this trace. +# Notice, you can also add metadata to the trace, we'll revisit this functionality later. +@trace(metadata={"purpose": "example"}, end_user_identifier="John Doe") +def chain(lang: str, framework: str) -> str: + return critique_code(hello_world(lang, framework)) + + +print(chain("Python", "FastAPI")) ``` ## Deploying Prompts @@ -169,18 +190,14 @@ def main(): Deployed prompts enable collaboration with non-engineers such as product managers & subject-matter experts. Users can iterate, refine & test prompts on Parea's playground. After tinkering, you can deploy that prompt which means that it is exposed via an API endpoint to integrate it into your application. +Checkout our full docs [here](https://docs.parea.ai/platform/deployment). ```python -import os - -from dotenv import load_dotenv - from parea import Parea from parea.schemas.models import Completion, UseDeployedPrompt, CompletionResponse, UseDeployedPromptResponse -load_dotenv() -p = Parea(api_key=os.getenv("PAREA_API_KEY")) +p = Parea(api_key="") # You will find this deployment_id in the Parea dashboard deployment_id = '' @@ -210,14 +227,6 @@ def main(): deployed_prompt: UseDeployedPromptResponse = p.get_prompt(data=test_get_prompt) print("\n\n") print(deployed_prompt) - - -async def main_async(): - completion_response: CompletionResponse = await p.acompletion(data=test_completion) - print(completion_response) - deployed_prompt: UseDeployedPromptResponse = await p.aget_prompt(data=test_get_prompt) - print("\n\n") - print(deployed_prompt) ``` diff --git a/assets/PareaLogoLight.png b/assets/PareaLogoLight.png new file mode 100644 index 0000000000000000000000000000000000000000..04f2c7de31a1599f3d9bb56972392b4ba9478087 GIT binary patch literal 18626 zcmeFYWmKErwl`XDaZ0h`6`;7g6?b=chhTwVfws6i6feP{xRpSl(Bke6#R=|#1jwcT zz2AM#dGGyj$Gu+S9IL>*Y_%jL#UvF z(mnXXMB)ATK}K(Fq56ZoR^P9T*f_85j_i9eTX$mA_uL0l33 zr`p5Ywz10R?xZa<^V7yk&BY#cc&M;tFK!!haGUqle8_lW~}o^zPbu@0v&{nR#gC z_1=oB(939Uq~hJwJl#rpf`&3vh$`JR;xo$Wp!>AY13I^j80an;6;o66loxu?1l_B8 z*T!~`i-9P|LiZ{#@^aJ4X=bK5qbHn32a3zeqZkFnqznw>(n9%EwZKt<=$7~A1sEFU zrVP?*KN^!z)p+g!?srXZqZ546!o`IpxHcB6?ndl>|IYIdcGob}U{=(rs!5ND)Vi4s zosWOMUCQ1GOU}#)&;Jy!ye89@5Il3iuQWvUX624G>Y#l)5MZeqVH1M8&uPt=NjPOdtgzJ?WgM-A)~qW&`B{tC^%n)3&4cjbRDY zP|9-i&HI zj>Kj}9$vjVy+K@)eR?a2JSWxd;aLR6-j-ZjE2b~p-uCYxuArv}G;_fHzo#FboGxwCBxF^ug#a&dv#oCER z+{xP89AIVbF8o*Ue@)%j`#%f*{}kE#hWt-x|Hn$a&0YUn?ca+D{!_Cq?piJ`PNI_L z055Z4w*Q^||G1QYt?s_1@0*+LKU=^(@t;k^8gRdJxZiIP-iG+#2M=f-$V-W9c|SbJ z#_%T8o<@+@jyhrZy!aeOBk?F$`r$*OuWsh951Vv>-EPDyZu><^blKciUEjG!<8d+H zeEal8oCg0=q)l2I|H{hbJx2;JVx_N2}?BWR`4n|n!2+$TeRvESjW_yen z@22+se)Y=ik&pNM{E;Lr+r$96%ygx?^vla z06Dl#_3w}GudS_JT`_3ZDh{aV+^kh}puCn&2Y)@@_0w2B@})DQ8b!TS$o2VIZ5x0t z2eE>)2F4~zgU_3l;z}l6?VA1ds%)_!+|0zxWyGFPdXU}sE&`< zZB;gtZFIBUe=o=q6amL4KM zVR&EsLSy~lJ3_X41J6yYDAh{Sz~dtq?e`mkU4aM(X{qiQ#$I92%<3y3rN6(Icor`shpbM&>(*a9+O=j`N>fi6*X@>w~kRWgj2uygV!=n3y^CfFE;wIc&41;ql*UEW3ol!h{^g% zVI}<@QNh&Dc2pdYm2jZaKO3Mme=vqVL(bJjxi z?ce1I(v2^sr0y+N-`zfvI&(dJH``-6!pLM#K8q@mt#kXzDX++>*)8f&&O+OD&`6w? znqp(CDo@PguAdKsU~NtqvFhq{A=|elFN1>-0qAOX8 ztm{$@w?K>Q^wv`OaMtVEow}tKHZZT%Ubm9_(ecq8!$``>a)+hQi!F~=0Q9=2Z)O&A z#K+=gl<4=!u$@j=(*1p`9&{)n2WAVO*3PZ)bFT3d-Cd3KP9ECia1=WH#>jwjO> zfC`&A<0u#7F{zIhPj`&R=X@>&-%?DY7#CRu<{Gwo4emW*iIX=_P)JFZJ=Qo+j~N2y_pRB;C$ zF6KtBC^(Qgp0-{iU))?S`1ieBNW+Ylbz(opN{tM>8{+V>?qQtSv3$-XHGH>i9*scK znzf-)GnY7UTTm{By~U1+3l(R!EP`hSr9BBbhx%)cMV3h*1qQYeldl5q_4wHqpgf_C znL&|u3lSsXd5Zu)+u}V$5qd)NviB$uUY%qMaw%s#7rac~Aj%}3DlptH0W}E_vw7)p zcTx84)C?qtJU!li?_omqgxK11JS$6B;lLDaTyp1VDIL>n4=cV~MZ;^2CQ$9$cym&{)VTq{hV?FiyKD>X?{`x~RZS+I4lH8nh zTf7-Fcy!U8-du_?+@bLK7E_x3di`xnqxD!N=EE~^lh2w+i3&+lLp8e}?6xMv(FxL) z@}wADL5wH{JkNQ=I2wTgK`|TaW(6=nf_C5gn{=b0yt3U;~y?HGJ^!_E;( zYtbe}KGtVj$L~(4xU(EH7BCaST;{wA$`)lRwhMk#dqw>MJK*&@d664El#;DvZ3d-% z7n~DpzN)XEyt#=RUCvNDE%uvao}(b9B$3(|k3o4Q6>Wjv2{lGy%vSmB-RXK2`zPoA zymdP<847v2us?@tV`fwd+cN2p_titLMJ+2WsU#-tJY|(rq^k&LjixMDmrUF4zbSb_ zSw;XLFDV0XhBR;*_#rA~$cyx7TIXg&?24`Dv-j-kM0-G;~mmJ}C215`U+;rbECN)oW{j_cOlM$w2P#% z(VRxyFFHb!JsRK7biNK{;vV0RZT9Ue7bAmcOCpXuTs|5Yloc&=nmzZC_72mD+;@vO z)yZ+e|18^B`F=J8{8=r=7W&q;r@Qt_J!9v6 z<&r^z>&OS;r?FwUKSCDqvyuG?9VCIfV+V`oo`l=Xq8eY^U;BWh_8->nH6BWoFK`4O z4<~_03C6U>Mm0chgq3(MOQ;^|hpu9_l1AU}Vm+`7EIvTfG)3vlBQ8D1Mr#rYPp!6V zMpKd8NC^rm99iEn$0%alYqcuxVvP*Y>Gom9u6=$&MZ4+ewi8-}mqwDA&Z3`^2%*!0 zo{tKIKT#GuE;b4y3aS?)cjBIcqZ^$bu&}&u5F9Sp@h&vxlU;ABV zugnzFh!KfXw6>H46u-@2Z7qqIIlB-BmN4j>KAH-2{?L@3ELSNH)>xE`Sw~H;i!_*pTy-e|COzs94pn{IcMkkUqk$ zxug;1M!PAO7xP$AcbJ;RjYJ%U-@dik;9E-oT+-~|OWtB_13O2X&VBpr>FG!=g4KZ__<2Y9vtlnsEG%(6`y&fA7S zso5B4z?uBPUv&#IJV0_6V?8jUcDKc&AC<=g$~`q0krD6 zZ=t1C-=QrWQTM1G9g8ihO2V~g8xn8o!CkM*?bJz9s@QMT(I4lO35SC#3@J!#Q>d>? zY4}DQw;;nYi_rK*lhIr>S`S`MEc72K*{mgc<=L~ra*T7z7v>&}W{QZsI z&{_>yi$)k(MUT@K(!|ahW(?;7Nkhtu0Px{LPyGqO?W2QNRUvK#%_6A595M}~fyt2J zu0UGN8G6~1L$2mh3F|k_l~;BvQW;ov^d3F2%U@N5-&H0CO|l@}1hjSOd~N624VmRn zU;DJ-J^}@%vXohKZy)N_y$^x@wD*DH&mUex6~03CPOXmSRbQfd6D&ivxSu!6TMfRb z=!qSoR_?TfRDA87)nu6UXm-aVCRM)em%`I6vs+a6T%>bJ#W)Z65Yox-La5tdUpQ)L zb)>^soF^oG(s&ImT~Or1@rtDtnt9(tK}emRW;$B_t>6^a-=Fy9OZ1J_j)=hr(bUT> zuD4}-CFcw_4Q$zm1022rCz6ci(`1DY$JTOb`_OK;a_~PI((bJNeK9VsC zfhV<|gp0@DzQJ&YoG~y0p(DAKOoX@3rrJF|Y||UeEKPcL(j}8PMk71OR zxHQ=YI)f}jmP8%)UeKpES9|F3Q~1ejMHGuRS0hH}`X-81O>a*JlJHnE&$n7sV@d|) zh6d=_J_=zy$D_t0$<;lrVd875nH0Fhly)hPT&}1T+B^__6o*DsSi!C4bWk^3QRXMy zK0Hl7J1jfuDk2*VolyuMCc}wwxzqZZcZvbXjZVbb7G*{g=wf3ksW>-N6nwhTH&Fye zjJLF38|s+c=~;Z+k2zDovq8r-6in$db;3?#j}MghU9!K zdCEG`OuXwnPALQd5yTKhL8I9;GCzssg&P=?Mjo8R#(y#drwpUHZauDHE$rA#BmPI&eGEoep527Ki_jxv-r0%Axf!xRD*ZEUWx{iPLPL% z?3n(XpQ$t!az?o@s$0w$wa$#6oo2n>o5;dQxzkukiKMECq*7-a^9^mVOPNc210PJ>zDb@LwhE_m85MRupZz|#PQ4$zr3qrD9~+?z-F2Y|csP;QoMif`gKfdb z?zFbgBOpnDGw}P1?Rq5VA|6_FasgU9){9&Kd7%J{wVEM4j4!56w>a1*kb>FB9%V+78Q&aSdj z54nWcM}$sSHu8Kzlcx`N#{?SD9r@~ICL)fvnvfYbdb@f8IZH1!k)#&lWnwyjRdwg) z@Vo6!o@DH>2?t(XHxyiNL?oSnko5!Q(rQhJOiZo0#2UwI!Hh-BX`!T`}?i-Db zDRT+DyK|mYdAjSvoJ!t8YMhu;X1FGTD{FsJD&`M;yCAYgIB5JEI7u04qW$Wiv=c@` z48EGMsoFZ;#|Qt?6`~UbrAy@sxh%UsIT{gbCkYL>dOCZJ#U_mImg--^(_Fs$Z0b3_ zl#+(>2^mOt&BRq*oqnLGs&T~iWH&EDR1?T|NWQ=c^_h(6wl!0;r3^AGTT2fFwDFJ<+g1jzW@{n(Q$f?Q`EHng6Z=C8FOt@_fff&LOH0lQIGZF z*jz!E>t@gj%=K`wpl<3&ryZvP?8$v4*f|=TrL{<~iv_ zo=oc6T)lP2RgYPg8bIekVu~ZZl7mz@n2BHd?WxTdRfuw74cj`8l=Nw~##uIUtD1w> zPp)6`<~xW}*aJKV#d7})7O#^S z81mrgn?PzRj1wbKH3N8wG#ufty4RkDO}HV<_SC zm;u@=(M_MZ!#ynEfZ&y(On+M%D=^3`t$u>7IKDnjh#+NI+YP?&P++KMvJclIsui2QeLI_b%oa zo&xoPX)53lz)P>s{v&!UIBo5G&zsExtEdjuB4w^b6qU$VE^xTURm2^0h7>NO%Rluv z{)uNe=^QSlYIJ(1FVc6|d<7@a2S#p>hdGIRGBU1}=+F&aPq*}rt%Me;-kzKZO+=g#uTb<&(gk`=BS%_E6xHV4|8V}RX=4eV) z9aCBQz>;o_K4=?rQu*!1ahuH4sJ0|726yFH&nT6;)d`cF^X@3NA#bm(ECJI?i6U0a zy~@@}g4VUqjDGdLXotYB7rEI92 z(f;C^Ue$#6p~~$LpKaTDf7I9E9OL6R&M5`+Vve+Yz6JGfe^{2V#J zM@0}zhmhZ>$pdxN|> zC{AO`MO-eHmgimV!pi9@?gKmTk+hX3E3JS1z|87MttNbf`^4T&@>49gCGe#Ynp4 zA?NByi2W;heu$q7J9x-f_@CQGEE~LhJAC|cFS#bErN(#RG_g4WnEE5{l+H)gXkKwp z=BYy^_XSkDXF^r1jkxgl#h^Jd=PD*8>EqcdpQ7OuQ%MW5lEfQ<1%qfdns`ZJvr1}n zWNch1cAMP5HD-N>`@W319vM*ZL1)s^+Pwk03oF?inhs;NK}M$t0XLt!eik{r~9A1UdL8%v??&hqB|tObWbX>`vY z2O?3MKr5@7;L2C(`P_@;6^xeo0UJ9ex!*hwnzy?HkC%){1>G-7 zCq^hU?hBN>T@Z1L(8Q|JSeQQ?VC3BS1$=@L_UCOeM@`qnT%TCIFGo!9jO6c zx4A|=gZavVDDkxz@HaR=LTaT=)TorAI$~i$bn-J=kZM;E$tn@rdHaoX(kSQbLMA+&XA5;Fx990|6elJprZ24FWRZI;(BdL*(_=e8-56$e`DN~^$xQs>Yr1VNBmVNE z)xbkTYRgYw#2iPyJ*tn$xEfV-u$?8xIn;S6@>99rHiZj4I;<<7-$hDczm0_;{w>Zd zW3TP9J>7*#$%ssxlwKie?>+}!X1^epB$nUNa^G5+0s#iGjTEwNOY2&s&>RWM#rYoa zzG+ditQk>Nl(+muIv=oQ^0%N)e0+ zm8PQ^`o>Axw@w=C)n+jApvM>0p3ykI>fwhhwK2?B?rR>V!iX-C50DY#0N=yrqm^4j zgSKf(hmBcJ+hP_&UZ1&I`-#*85uxo)Y;cCp>Kq6!nnEa}`@MTJFU3#W#Q?vw9}Lv5 zpa8IR8V;eSWvCx-`VG-!s@jFyJcoPVNc^olz4kb%+G^g>Z2k((qJ&bxd=Z@OCt?cp zeUneAajS7RcGWFdE)&2hjK7=XF7xdt{G1Fn|CXY8ievAszTg~mhdt<(BbAAN)K82-CSK>m)L7q8>>W5=-LZ&3{P zPS(AOH!bOd&g2a{rGP`Yn2uZhxcaG+YGCJMYF+f%I&R5nlBz}GO-M^h>*iq;hnn%L zr2x0o9vY~sXMHi-9d-rn+v6hEQT;U5iiP&Tvn{-Un{BZ`e^kpR`bJo>K1uT8#uN2i zw@P{gS233tzUXAN<6+{9aTy!P)#I=KF$DYrR9t;N^tf_ze5m$~;g@R{G?jSHi}WvW zx(N5LwbBO1SYa^7Z5B#eo5HoE-G|Xa4g(6@7SS8NiS#cgCPLqqty-BcFYRp8*&AMg ztHwTZaZ?qY*RvQkB+v~Zr5E^y8N%F{oL$Q90)8EVR2;Rq1M+%FEi*(g;E z>KWU4TuPlY!hj6s?g@+scSK`Z(rHP0mxy|FM<38?*-19$G_5-Nr(G1&q?+V%*eVrO z*5cJml@%2(t*ceccP;T3#G4SSqOM6F8Dpfrg{ouPxBt{HRU*|bm%XH#pU%yqQS}Jc z!FT1G>CF2%@KiuCt91rk<)bfU)th!Lga36F;LR4ffsBkasD#Tak!hUlsh|H2RDn#= z=FuG?a+~a08k>?H3F7PfY>iJc-R=m~S_$w>eH4nUa;TAe(fK^9HgU&-RP8wgRFRg< zrds?rzG$(dbG*Y5B6i+E{S+=V*r}u}Ka;RqaGQ%-sl}e|Rm54_+AmLyz`Dvif;R3yHdj+UyqcnZq6Io@cE6 zEOzprGaGo^$y&nZ8FqY2O{Vld69UndwMHG|meam^Peo>%+)Mn4=a?27xK&a;|2ejMO0;EI}G{#6tzxB6FbBZO8?j%ttU@t1CvOc?M z5a?&$dGVevyfR(T!HziJUYon7W77OMpRXz?kd2MHX(v}GH`Dw2{JNvCFvU9U*|yT> z!YaSVsg_jaGzhYT#4W`%qS(x-Uhc{E_lTE`9@R%M~Fip!O)uooh`OeRt zM^9qS0K0u7O#IeM#3R^6@~)hnXNjHE?NjkgFbMbA*98lu*rEV;LCWE*H-2+3qnE0p zSQU2PHoVnsnj5cE6=ZlY;c;>ET{~#$szC8Nxva3<%Ngvv_`|KxIuo=_SmE!vXekD} zUaPhCOH=mqIbwRXHTY?ttHp{K|%aw0t5@4Pp3QijI7>g!Au#x2Gq?|Nwlr-@Y&??yme!QDgOD#-l zFZ1No;2?T5OHe%O+1K->H`wAa2^Ly3%HLp@!-ZitxZ#9FiqEl?u6wwCX;Ob9G`(< zOxd>n@H(n@2p2t@<6s-gaW%(I_q}K$>|I^$irp>-!y;+Ri_g6$x^O!M5Xuh40dG-AAd`tGyL^ zV92Rg)+pP}ji0c%cZ$W`lYDf_q-V@pxdOI>PdL4T-BG7FF$>WT&hH%XQhQ)|6WkKRM`cZ7ZW?2mrDnBvF+b@#KA=qH zEZ~Y?S*wTKuy6R|3QT?gnI<(fG+%$-&Rmfzj?|WFKblcqlcpBXtAzpIiMEBlR`z_u zPZoZwy3>d1|Cnrl#((!-Jjev7&x;b5z}&WX4Z_S%Q945 zl_q~8U0DM}vUsLV0#kO)wp;^HO}6=!7rQtu)>vw3t!ynp4C%bOG>uIk(|5H<_48n> z_ISm7jQvvqxj-y@+bf5x7)qx@@hLss>1E-0S>DR}A8^OIY=nvX`ULQtgDdtzRqiNj z*F9501a`!7vy^_bFs7NvpXD~7}DaoHg@;k%*77J7l>hV?%R z%?H1QL{Sz9=tGHnodY+AuttWdG;g;CZ4=7hMFv&8)6Jz>;}6gAttQC(sfzHSR@6R^ zXCqKRXrbqb4IY<&94)xU5f_&G<+wpc0Hg0aX7S|yGJ|OBOd>;>X1P!=z3p{VV9pB% z>~cS0tMj>S8G>4f-L&wQMy@CIQ;n2BAnPua{KrF0Db=3*K6!!?&4XOUe^`TenB0T! zzk_X2*o0#_*_sDa6pf|2QD;@hzVD3dXJ5ryBr&!+oha0P(%E^ zAMfUw#d>2T8_`S`kDrZEXKjKrWBMIZX=PWJHyL2nTNRHNrd@xGvuAh{ zM`}b%@e<(TP_)-&X0;GB)jfxz-PChz%H4M_bR}fiFSc+z$ZB)u~|1>#}O=sDo)EOEdDJy z#Zey*vrjaeOuwGd8BBi&eRVCsXtU*?_(?-hip`r^z_+jLGxD|s@*Jd#Y`eJQQKa^w zm@H`wc#Z>5b&<GqX1`EVN>||5ZK?QlJtf4N>m0A#botZBPr| zySiLExjvS4AV5NtqS%$P2PCv=Wohnl3gc%QFq?d2^aE=dCU|>XHcqMS>{_g~T(p^iN0uj{)c+y zS|5iacUD$bl=c7`JxNC8g_*ub9p&YtF$z0yxO7YZ-DTC|!ZY)w0)AiVDC}iJI|P|P z#)jD?uj)xRqkmSCRFBpNL+H+%C@-E~8od9@md|T6qx?^A3-o6DP&pga zXq5oou6Eq)m{FXWn5+adxit@>pRh_~G9r*X*f2J2*@;^O$Ua~L{<0;aM)kCpu|r5h zV?6lTt4W#lNTIqw^MhoP_(H?tECB^-E26OR*u+AtF?xV(r3$Y}j9S3(m)v@lNflB- zW^ga~PV2tC*WWuj%E0@=bP~I=1O+5NgX=eP4f%Vj8@O9PvbevF? zBGIi*KB66}u+M9Kqpt`8PNKEuf9yi&6+lJ^TV!QLVP;M$fhESBiHz$~w4?nua&TE) zs%=Rd{q3{JyJNCld<;aJ`@(il9hNS$f~<@RI5&?4EM}MyJnxNN^0w``p`s-DrSpk} zDHEeDK1?%cAQ99%{<5Q8=poOM78(j3d~V1d%sy|xDOUM0-=VYWFckoIh@xVB?ORcG z=xG0=D20noQ|xu%Prq}5(0SXGH;fW(;&35Z=M=8l(uwuU) zxoOz?43fF=_2G7&q$xs z?)%MymvAw?38Sd}GTwu+HQ&{F>F6p$BwIJE%>yzs*@Ldj;Jm;v)&54pyf2KNT_G4< zG3Gfh2f!7^e#Am>c5AJ%pu`Z+#k+#$%r9~@r9=)o_SjC;4s0t(scVRMrCl4U@l_X3 z38;gQR9#%tF1NvsuvA5x*=Z8M0wD%d*Wt1({J|_YHtOP!PE$}|LCf~$n#cU{Ey0NmavXp7R^|-FEK5be)XF4joa3@=8 zX4xj&*Y)?8gP`oULe$2mJVayJ`s7DyM`w&tl}{=?WsaP!1S%u$cck7z45$%Uh;Kju zl$baSeM=Y_pQ!zPf^fzOHrf^l$}1_K;O*E7F_iPl zUun5Nw2NSk2CuCsD)5SY0*ri>DW*i}@gs6u*B_vg+lwHw*Q~FAGGNgDe0XJ>fb`mw zc5V@EgNJM%TRD&4!I$M|mqLdfah4_FZxcy{$MeYeJW{TD?}^6{$$ECMvb!+8)wUNR zQn{~l7(T^!`Utw?D?sUkv{pWi=uuGM^b_Eb1OfP8tQ;zRXf_S_#E`A4f9~4kMCc|0 z{_th!cR$J`C^li9=iOAf9y7PEx3Q+ws{xw8t%A$$5^HRwtj2Z<`1<&G-FnMTkm{me z4=V`UYDRe}Lv(i#<14$tqSOFcDHFG8^s*Q5yYt zAbO(wy`olMLG$RiMvW-eZB90=s<&SYiZ!r7I_>$e6Jiq z$F{*J#~pxRXp(EvK=+#<;u|D041N3!zKZ8)wwv*bE*GG>atkH?l?1gAnQIi3neO2VMdfMa=)TChw)v+Krtci%ZY={AMeE@pUe`=p%W?H7objEW+A}V7FUE zQ3+GI$#wvo1)1qLQG9FUJZ4FR2Eg~|i;Z+9S|iSH!yM_MHJqsh;T4LZM zSO1Kz7!w|MCrZXjTtqew*IhI(kSyA8aA||f?(AS8B%E(BJH*DVmW#bfQx*T_7ar*! z>XRU$GW)NspdHMz7Jwv?FejkutmxEts*LCS?(v-9>-HhhK&F0o(V@QPs8i^yxSrs$ zYoC7ehMBc|pI&ZuF=XYp_^e1#upxFKkHv(RN^TeDWN&%LK=(Yp;%JS@+Y(=(+LP|o zwjWCs4O}Adk4ekHJ|PXK<9EglOiCyw*_<^Qdfj9d z;_)4@xxoZb!0SpzCC_8k8BB)ye85o? z49i}xeqj_5QwWFR;~q5?8T;ymeL%~U>mO@-&xs#OiD%?5*A`|~;V6&ys|EdjOy`-2 zbN#SD?|^%L<&<1S5_Y@`L)6LtRzvw9gV~IqwHmDnHW~37-l^yJqyU3zt3YnYklZ?mRZwzx85-9F zikX&dk~Cjit`(X#L6@Gu^^RXh%YnaNH!4&(ofIwM1$ zUt~m#^DG^(y+(~tk1e}8{gLIm6`J8CpXnBqd_T;nkYpvEX8x{_C&SMXTusM&>h*%vU!i1jZ@7Sk`j&)9DKs`G< zfV^klxmTLma{Qa`8d%4$gFd!-_f>|_?|42Jer9lf7z3BKC3|_cu$ak~YAC!cObKZ* z%1F0`a-KNsxfC8&z13gktLO6nYNWC`q|VZEN3r?X@xw-);TnWi6O+q^omdg?;34Ha z#PRzfZ_4`1$1)d+kv|@7E|kT6UHx89`bzP$-e_r<(fAADdf5KEmM2blw`EnGl##}FtV=<#-Vn0?QD5o0-@}Ip>xL2Yu#woF~n<0v6rYG2b)zC|eYRd(y(eY&Tc>4oj7Y zNE2Zvl}kK)`SY2ko@m5FJg&SplJ91+JP$tSdsJk#|4eEzt~(#|BP%Ti(IB8a!U z$-Piey6V8gp+8-MH5dc{u$TSl{cRR%6&rr;7MB#tS~4{5_6eoHY^>;9_gl{M2Pq*_ zJ~G7rOs}o0h!QesJL#@@##**_dbr#@Gr!fwAUF#~srvPBnZ|@S%}*R`%!M?!*k&{Q zMk#(3hU2Qboval`#X2o|Tdfu;XR1(jEEXvSA(Dyx)Y2sr0+CRbJ@jYuZW#>$oVKOr z%?OpdIyCjj{qf82*l1{tUwJnvElg4hjbCKX%eP0cBwXv_WD!+_)@9%hYp^GYdea4M zlHh<@TSVP8zzoXi%4DOaKTNL7-oj(mm*#1oQs;%mvdg~jzKn6ATH<-zdYJ@5F1-SY z-fSvp{f+<-ReQ*eb(48}JD<)@hZyYaNF_Hv&-dO&$D0=1wO*;z56(-L<^e@A# zA=_Ux@VCM>9AMLN7Qc z@Zp(icL-InY~52p1;Qyk=MVKk6xI#VqLMky<&6_(je99Sf3CI*k*&Y9eB3z-CkI$` zl2G1QOl0XrIru1_vtSG(h%Zu?Ken5=#7z)oeJoyWik@8;R|?zpKH~qs*1W{vXW^D@ z2|{81s|AiHE-qdin9l`^c{^3X-Qj*nfC#kyI(uv)}V8HVAVS^z&ItwOf^oJW|*bXK^?n>7Jd* zlWT@EGX5<1_B%gu|GLA!xBuPyyzXpZxm4MI-PdB%qfMUHik7@EY8R`xa>c^tWMT9F zn)l6fCjYgew5AHwl#r7k|7p1Snt;x|#n+)^VnPNB0hVmss-o z!<+q@jGI>ix4^!CQ~N)DpVg-9t4qCqe~Xpc^vZub@9yXG+;8!{`I2AdKj%%A0Ec^X z@6O8y8p|_;w;W@?^6#va_N&6#d%7n!y!-Su{&87c+UQp6(H-U_>@bflxNrk_?1&fW gTmZ_Gjw8n(dlL)Y1by*rb)cyXp00i_>zopr0Q`LifdBvi literal 0 HcmV?d00001 diff --git a/cookbook/anthropic/tracing_anthropic_tool_use.py b/cookbook/anthropic/tracing_anthropic_tool_use.py index e90a5ff4..2087f53e 100644 --- a/cookbook/anthropic/tracing_anthropic_tool_use.py +++ b/cookbook/anthropic/tracing_anthropic_tool_use.py @@ -3,8 +3,8 @@ import anthropic from dotenv import load_dotenv -from parea import Parea from cookbook.assets.data.anthropic_tool_use_examples import missing_information, multiple_tool_use, single_tool_use +from parea import Parea load_dotenv() diff --git a/cookbook/openai/tracing_azure_open_ai.py b/cookbook/openai/tracing_azure_open_ai.py index 4be3a007..b3cf23b3 100644 --- a/cookbook/openai/tracing_azure_open_ai.py +++ b/cookbook/openai/tracing_azure_open_ai.py @@ -4,8 +4,8 @@ from dotenv import load_dotenv from openai.lib.azure import AsyncAzureOpenAI, AzureOpenAI -from parea import Parea, trace from cookbook.assets.data.openai_input_examples import functions_example, simple_example +from parea import Parea, trace load_dotenv() diff --git a/cookbook/openai/tracing_open_ai_streams.py b/cookbook/openai/tracing_open_ai_streams.py index 4823f1b7..ee823af2 100644 --- a/cookbook/openai/tracing_open_ai_streams.py +++ b/cookbook/openai/tracing_open_ai_streams.py @@ -4,8 +4,8 @@ from dotenv import load_dotenv from openai import AsyncOpenAI, OpenAI -from parea import Parea, trace from cookbook.assets.data.openai_input_examples import functions_example, simple_example_json +from parea import Parea, trace load_dotenv() diff --git a/cookbook/openai/tracing_with_openai_requests_api.py b/cookbook/openai/tracing_with_openai_requests_api.py index 55eb92c5..d1d47f35 100644 --- a/cookbook/openai/tracing_with_openai_requests_api.py +++ b/cookbook/openai/tracing_with_openai_requests_api.py @@ -3,8 +3,8 @@ import httpx from dotenv import load_dotenv -from parea import Parea, aprocess_stream_and_yield, convert_openai_raw_to_log, process_stream_and_yield, trace from cookbook.assets.data.openai_input_examples import functions_example, simple_example, tool_calling_example +from parea import Parea, aprocess_stream_and_yield, convert_openai_raw_to_log, process_stream_and_yield, trace from parea.wrapper import get_formatted_openai_response load_dotenv() diff --git a/parea/wrapper/anthropic/anthropic.py b/parea/wrapper/anthropic/anthropic.py index c4e6dda0..e843cdad 100644 --- a/parea/wrapper/anthropic/anthropic.py +++ b/parea/wrapper/anthropic/anthropic.py @@ -9,8 +9,7 @@ from parea.cache.cache import Cache from parea.helpers import timezone_aware_now -from parea.schemas import CacheRequest, LLMInputs -from parea.schemas import ModelParams +from parea.schemas import CacheRequest, LLMInputs, ModelParams from parea.schemas import Role as PareaRole from parea.schemas import TraceLog from parea.utils.trace_utils import make_output, trace_data From d3ef3d4275bb43c5fc0efc6ef4eaaf56f4017647 Mon Sep 17 00:00:00 2001 From: Joschka Braun Date: Mon, 1 Jul 2024 14:52:59 -0400 Subject: [PATCH 4/4] chore: bump version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4708f0f0..c9132347 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "parea-ai" packages = [{ include = "parea" }] -version = "0.2.181" +version = "0.2.182" description = "Parea python sdk" readme = "README.md" authors = ["joel-parea-ai "]