Skip to content

Commit e5d51e5

Browse files
committed
examples: schedule-extract.py update
1 parent 7358c50 commit e5d51e5

31 files changed

+3772
-4134
lines changed

examples/Langroid_quick_start.ipynb

Lines changed: 829 additions & 3773 deletions
Large diffs are not rendered by default.

examples/basic/autocorrect.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def chat() -> None:
4343

4444
config = ChatAgentConfig(
4545
llm=OpenAIGPTConfig(
46-
chat_model=OpenAIChatModel.GPT4,
46+
chat_model=OpenAIChatModel.GPT4o,
4747
),
4848
vecdb=None,
4949
)
@@ -84,16 +84,13 @@ def main(
8484
debug: bool = typer.Option(False, "--debug", "-d", help="debug mode"),
8585
no_stream: bool = typer.Option(False, "--nostream", "-ns", help="no streaming"),
8686
nocache: bool = typer.Option(False, "--nocache", "-nc", help="don't use cache"),
87-
cache_type: str = typer.Option(
88-
"redis", "--cachetype", "-ct", help="redis or momento"
89-
),
9087
) -> None:
9188
set_global(
9289
Settings(
9390
debug=debug,
9491
cache=not nocache,
9592
stream=not no_stream,
96-
cache_type=cache_type,
93+
cache_type="redis",
9794
)
9895
)
9996
chat()

examples/basic/chat-azure-client.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,17 @@
22
Example showing how to use Langroid with Azure OpenAI and Entra ID
33
authentication by providing a custom client.
44
5+
NOTE: this example is ONLY meant for those who are trying to use a custom
6+
Azure client, as in this scenario:
7+
https://langroid.github.io/langroid/notes/custom-azure-client/
8+
This NOT TYPICAL for most users, and should be ignored if you are not using such a
9+
custom client.
10+
11+
For typical usage of Azure-deployed models with Langroid, see
12+
the [`test_azure_openai.py`](https://github.com/langroid/langroid/blob/main/tests/main/test_azure_openai.py) and
13+
[`example/basic/chat.py`](https://github.com/langroid/langroid/blob/main/examples/basic/chat.py)
14+
15+
516
For an async version of this example, see chat-azure-async-client.py.
617
718
For more details see here:

examples/basic/chat-search.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
answer questions:
44
- GoogleSearchTool
55
- DuckduckgoSearchTool
6-
- MetaphorSearchTool
6+
- ExaSearchTool
77
When the LLM doesn't know the answer to a question, it will use the tool to
88
search the web for relevant results, and then use the results to answer the
99
question.
@@ -18,7 +18,7 @@
1818
1919
There are optional args, especially note these:
2020
21-
-p or --provider: google or ddg or metaphor (default: google)
21+
-p or --provider: google or ddg or Exa (default: google)
2222
-m <model_name>: to run with a different LLM model (default: gpt4-turbo)
2323
2424
You can specify a local in a few different ways, e.g. `-m local/localhost:8000/v1`
@@ -32,15 +32,15 @@
3232
[README](https://github.com/langroid/langroid#gear-installation-and-setup).
3333
3434
35-
(b) If using MetaphorSearchTool, you need to:
36-
* set the METAPHOR_API_KEY environment variables in
37-
your `.env` file, e.g. `METAPHOR_API_KEY=your_api_key_here`
38-
* install langroid with the `metaphor` extra, e.g.
39-
`pip install langroid[metaphor]` or `uv pip install langroid[metaphor]`
40-
or `poetry add langroid[metaphor]` or `uv add langroid[metaphor]`
41-
(it installs the `metaphor-python` package from pypi).
35+
(b) If using ExaSearchTool, you need to:
36+
* set the EXA_API_KEY environment variables in
37+
your `.env` file, e.g. `EXA_API_KEY=your_api_key_here`
38+
* install langroid with the `exa` extra, e.g.
39+
`pip install langroid[exa]` or `uv pip install langroid[exa]`
40+
or `poetry add langroid[exa]` or `uv add langroid[exa]`
41+
(it installs the `exa-py` package from pypi).
4242
For more information, please refer to the official docs:
43-
https://metaphor.systems/
43+
https://exa.ai/
4444
4545
"""
4646

@@ -62,10 +62,10 @@ def main(
6262
debug: bool = typer.Option(False, "--debug", "-d", help="debug mode"),
6363
model: str = typer.Option("", "--model", "-m", help="model name"),
6464
provider: str = typer.Option(
65-
"google",
65+
"ddg",
6666
"--provider",
6767
"-p",
68-
help="search provider name (google, ddg, metaphor)",
68+
help="search provider name (google, ddg, exa)",
6969
),
7070
no_stream: bool = typer.Option(False, "--nostream", "-ns", help="no streaming"),
7171
nocache: bool = typer.Option(False, "--nocache", "-nc", help="don't use cache"),
@@ -100,10 +100,10 @@ def main(
100100
match provider:
101101
case "google":
102102
search_tool_class = GoogleSearchTool
103-
case "metaphor":
104-
from langroid.agent.tools.metaphor_search_tool import MetaphorSearchTool
103+
case "exa":
104+
from langroid.agent.tools.exa_search_tool import ExaSearchTool
105105

106-
search_tool_class = MetaphorSearchTool
106+
search_tool_class = ExaSearchTool
107107
case "ddg":
108108
search_tool_class = DuckduckgoSearchTool
109109
case _:

examples/basic/chat.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ def main(
7171
load_dotenv()
7272

7373
# use the appropriate config instance depending on model name
74+
# NOTE: when using Azure, change this to `lm.AzureConfig`
7475
llm_config = lm.OpenAIGPTConfig(
7576
chat_model=model or lm.OpenAIChatModel.GPT4o,
7677
chat_context_length=4096,

examples/basic/oai-asst-chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def chat() -> None:
4343

4444
config = OpenAIAssistantConfig(
4545
system_message=sys_msg,
46-
llm=OpenAIGPTConfig(chat_model=OpenAIChatModel.GPT4), # or GPT4o
46+
llm=OpenAIGPTConfig(chat_model=OpenAIChatModel.GPT4o),
4747
)
4848
agent = OpenAIAssistant(config)
4949
task = Task(agent)
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
"""
2+
Agent that uses a Tool to execute python code.
3+
4+
CAUTION - this is a security risk, as it allows arbitrary code execution.
5+
This is a bare-bones example. For a real application, you would want to restrict
6+
the code in various ways, e.g. by using a sandboxed environment, or by restricting
7+
the modules that can be imported.
8+
9+
Run like this (leave model empty to use default GPT4o)
10+
11+
uv run examples/basic/python-code-exec-tool.py -m gpt4o-mini
12+
"""
13+
14+
import io
15+
import contextlib
16+
from fire import Fire
17+
from rich.prompt import Prompt
18+
from langroid.pydantic_v1 import Field
19+
from langroid.agent.tools.orchestration import ResultTool
20+
import langroid as lr
21+
import langroid.language_models as lm
22+
23+
24+
def execute_code(code_string):
25+
"""
26+
A minimal function to execute Python code and capture its output.
27+
28+
Args:
29+
code_string: The Python code to execute
30+
31+
Returns:
32+
Tuple of (output, local_variables)
33+
"""
34+
# Create dictionary for local variables
35+
local_vars = {}
36+
37+
# Capture stdout
38+
buffer = io.StringIO()
39+
40+
# Execute code with stdout redirection
41+
with contextlib.redirect_stdout(buffer):
42+
try:
43+
exec(code_string, globals(), local_vars)
44+
success = True
45+
except Exception as e:
46+
print(f"Error: {str(e)}")
47+
success = False
48+
49+
output = buffer.getvalue()
50+
return output, local_vars, success
51+
52+
53+
class PyCodeTool(lr.ToolMessage):
54+
request: str = "py_code_tool"
55+
purpose: str = "To execute python <code> and return results"
56+
57+
code: str = Field(
58+
...,
59+
description="""
60+
Syntactically valid Python code that can be placed in file to
61+
be run by the Python interpreter. MUST NOT CONTAIN any CODE-BLOCK
62+
delimiters like triple-backticks.
63+
""",
64+
)
65+
66+
def handle(self):
67+
output, local_vars, success = execute_code(self.code)
68+
if success:
69+
print("Successfully ran code. Results:")
70+
print(output)
71+
print("Local variables:")
72+
print(local_vars)
73+
else:
74+
print("Failed to run code.")
75+
return ResultTool(output=output, local_vars=local_vars, success=success)
76+
77+
78+
def main(model: str = ""):
79+
llm_config = lm.OpenAIGPTConfig(
80+
chat_model=model or lm.OpenAIChatModel.GPT4o,
81+
)
82+
agent = lr.ChatAgent(
83+
lr.ChatAgentConfig(
84+
name="Coder",
85+
llm=llm_config,
86+
# handle LLM non-tool msg
87+
handle_llm_no_tool=lambda msg: ResultTool(
88+
output=msg.content,
89+
success=True,
90+
),
91+
system_message=f"""
92+
You are an expert python coder. When you get a user's message,
93+
respond as follows:
94+
- if you think you need to run Python code,
95+
use the TOOL `{PyCodeTool.name()}` to perform the task.
96+
- otherwise simply respond to the user's message.
97+
""",
98+
)
99+
)
100+
agent.enable_message(PyCodeTool)
101+
# task specialized to return ResultTool
102+
# set restart to False to maintain conv history across `run` calls
103+
task = lr.Task(agent, interactive=False, restart=False)[ResultTool]
104+
105+
while True:
106+
user_input = Prompt.ask("User")
107+
if user_input.lower() in ["x", "q"]:
108+
break
109+
result: ResultTool | None = task.run(user_input)
110+
if result is not None:
111+
# code was run; do something with the output if any
112+
if result.success:
113+
print("Output:", result.output)
114+
else:
115+
print("Code execution failed.")
116+
117+
118+
if __name__ == "__main__":
119+
Fire(main)

examples/basic/schedule-extract.py

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,25 @@
44
Enter vague, unstructured info like:
55
66
M-F 8-3pm at home or Tue/Wed 9-1030am at daycare
7+
8+
Run like this -- (omit the -m arg for default gpt-4o-mini LLM)
9+
10+
```bash
11+
uv run examples/basic/schedule-extract.py -m gpt-4o
712
"""
813

914
import langroid as lr
1015
import langroid.language_models as lm
11-
from enum import Enum
1216
from langroid.agent.tools.orchestration import FinalResultTool
13-
from typing import List, Dict, Tuple
17+
from typing import List, Dict, Literal, Tuple
1418
from langroid.pydantic_v1 import BaseModel, Field
1519
from rich.prompt import Prompt
1620
from fire import Fire
1721

1822

1923
class Slot(BaseModel):
20-
start_time: float = Field(..., description="start time of the slot, e.g. 11:30AM")
21-
duration: float = Field(..., description="duration of the slot in MINUTES")
24+
start_time: str = Field(..., description="start time of the slot, e.g. 11:30AM")
25+
end_time: str = Field(..., description="end time of the slot, e.g. 12:30PM")
2226
location: str = Field(..., description="location of the slot or UNKNOWN")
2327

2428

@@ -30,28 +34,19 @@ class DaySchedule(BaseModel):
3034
slots: List[Slot] = Field(..., description="List of time slots for the day")
3135

3236

33-
class Weekday(int, Enum):
34-
"""
35-
A class to represent a weekday.
36-
"""
37-
38-
MON = 0
39-
TUE = 1
40-
WED = 2
41-
THU = 3
42-
FRI = 4
37+
Weekday = Literal["Mon", "Tue", "Wed", "Thu", "Fri"]
4338

4439

4540
class Availability(BaseModel):
4641
"""
4742
A class to represent schedule information.
4843
"""
4944

50-
week_availability: Dict[int, DaySchedule] = Field(
45+
week_availability: Dict[Weekday, DaySchedule] = Field(
5146
...,
5247
description="""
5348
Dictionary mapping weekday to DaySchedule,
54-
where 0 = Monday, 1 = Tuesday, ... 4 = Friday
49+
where weekday is one of "Mon", "Tue", "Wed", "Thu", "Fri"
5550
""",
5651
)
5752

@@ -77,17 +72,27 @@ def examples(cls) -> List["lr.ToolMessage" | Tuple[str, "lr.ToolMessage"]]:
7772
cls(
7873
availabilities=Availability(
7974
week_availability={
80-
Weekday.MON: DaySchedule(
75+
"Mon": DaySchedule(
8176
slots=[
82-
Slot(start_time=10, duration=360, location="home"),
8377
Slot(
84-
start_time=15, duration=60, location="daycare"
78+
start_time="10:00",
79+
end_time="16:00",
80+
location="home",
81+
),
82+
Slot(
83+
start_time="15:00",
84+
end_time="16:00",
85+
location="daycare",
8586
),
8687
]
8788
),
88-
Weekday.WED: DaySchedule(
89+
"Wed": DaySchedule(
8990
slots=[
90-
Slot(start_time=10, duration=360, location="home")
91+
Slot(
92+
start_time="10:00",
93+
end_time="16:00",
94+
location="home",
95+
)
9196
]
9297
),
9398
}
@@ -110,7 +115,7 @@ def handle(self) -> str:
110115

111116
def make_schedule_task(model: str = ""):
112117
llm_config = lm.OpenAIGPTConfig(
113-
chat_model=model or lm.GeminiModel.GEMINI_2_FLASH_LITE,
118+
chat_model=model or lm.OpenAIChatModel.GPT4o_MINI,
114119
)
115120
agent = lr.ChatAgent(
116121
lr.ChatAgentConfig(

examples/data-qa/sql-chat/sql_chat.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -133,9 +133,6 @@ def main(
133133
tools: bool = typer.Option(
134134
False, "--tools", "-t", help="use langroid tools instead of function-calling"
135135
),
136-
cache_type: str = typer.Option(
137-
"redis", "--cachetype", "-ct", help="redis or momento"
138-
),
139136
schema_tools: bool = typer.Option(
140137
False, "--schema_tools", "-st", help="use schema tools"
141138
),
@@ -145,7 +142,7 @@ def main(
145142
debug=debug,
146143
cache=not nocache,
147144
stream=not no_stream,
148-
cache_type=cache_type,
145+
cache_type="redis",
149146
)
150147
)
151148
print("[blue]Welcome to the SQL database chatbot!\n")
@@ -198,7 +195,7 @@ def main(
198195
use_schema_tools=schema_tools,
199196
addressing_prefix=SEND_TO,
200197
llm=OpenAIGPTConfig(
201-
chat_model=OpenAIChatModel.GPT4,
198+
chat_model=OpenAIChatModel.GPT4o,
202199
),
203200
)
204201
agent = SQLChatAgent(agent_config)

examples/docqa/chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ def main(
8585
config = DocChatAgentConfig(
8686
llm=llm_config,
8787
n_query_rephrases=0,
88+
full_citations=True,
8889
hypothetical_answer=False,
8990
# how many sentences in each segment, for relevance-extraction:
9091
# increase this if you find that relevance extraction is losing context
@@ -136,7 +137,6 @@ def main(
136137
case "lance" | "lancedb":
137138
config.vecdb = lr.vector_store.LanceDBConfig(
138139
collection_name="doc-chat-lancedb",
139-
replace_collection=True,
140140
storage_path=".lancedb/data/",
141141
embedding=embed_cfg,
142142
)

0 commit comments

Comments
 (0)