Skip to content

Commit 5bd6608

Browse files
committed
examples: schedule-extract.py update
1 parent 7358c50 commit 5bd6608

16 files changed

+2722
-316
lines changed

examples/basic/chat-azure-client.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,17 @@
22
Example showing how to use Langroid with Azure OpenAI and Entra ID
33
authentication by providing a custom client.
44
5+
NOTE: this example is ONLY meant for those who are trying to use a custom
6+
Azure client, as in this scenario:
7+
https://langroid.github.io/langroid/notes/custom-azure-client/
8+
This NOT TYPICAL for most users, and should be ignored if you are not using such a
9+
custom client.
10+
11+
For typical usage of Azure-deployed models with Langroid, see
12+
the [`test_azure_openai.py`](https://github.com/langroid/langroid/blob/main/tests/main/test_azure_openai.py) and
13+
[`example/basic/chat.py`](https://github.com/langroid/langroid/blob/main/examples/basic/chat.py)
14+
15+
516
For an async version of this example, see chat-azure-async-client.py.
617
718
For more details see here:

examples/basic/chat.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ def main(
7171
load_dotenv()
7272

7373
# use the appropriate config instance depending on model name
74+
# NOTE: when using Azure, change this to `lm.AzureConfig`
7475
llm_config = lm.OpenAIGPTConfig(
7576
chat_model=model or lm.OpenAIChatModel.GPT4o,
7677
chat_context_length=4096,
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
"""
2+
Agent that uses a Tool to execute python code.
3+
4+
CAUTION - this is a security risk, as it allows arbitrary code execution.
5+
This is a bare-bones example. For a real application, you would want to restrict
6+
the code in various ways, e.g. by using a sandboxed environment, or by restricting
7+
the modules that can be imported.
8+
9+
Run like this (leave model empty to use default GPT4o)
10+
11+
uv run examples/basic/python-code-exec-tool.py -m gpt4o-mini
12+
"""
13+
14+
import io
15+
import contextlib
16+
from fire import Fire
17+
from rich.prompt import Prompt
18+
from langroid.pydantic_v1 import Field
19+
from langroid.agent.tools.orchestration import ResultTool
20+
import langroid as lr
21+
import langroid.language_models as lm
22+
23+
24+
def execute_code(code_string):
25+
"""
26+
A minimal function to execute Python code and capture its output.
27+
28+
Args:
29+
code_string: The Python code to execute
30+
31+
Returns:
32+
Tuple of (output, local_variables)
33+
"""
34+
# Create dictionary for local variables
35+
local_vars = {}
36+
37+
# Capture stdout
38+
buffer = io.StringIO()
39+
40+
# Execute code with stdout redirection
41+
with contextlib.redirect_stdout(buffer):
42+
try:
43+
exec(code_string, globals(), local_vars)
44+
success = True
45+
except Exception as e:
46+
print(f"Error: {str(e)}")
47+
success = False
48+
49+
output = buffer.getvalue()
50+
return output, local_vars, success
51+
52+
53+
class PyCodeTool(lr.ToolMessage):
54+
request: str = "py_code_tool"
55+
purpose: str = "To execute python <code> and return results"
56+
57+
code: str = Field(
58+
...,
59+
description="""
60+
Syntactically valid Python code that can be placed in file to
61+
be run by the Python interpreter. MUST NOT CONTAIN any CODE-BLOCK
62+
delimiters like triple-backticks.
63+
""",
64+
)
65+
66+
def handle(self):
67+
output, local_vars, success = execute_code(self.code)
68+
if success:
69+
print("Successfully ran code. Results:")
70+
print(output)
71+
print("Local variables:")
72+
print(local_vars)
73+
else:
74+
print("Failed to run code.")
75+
return ResultTool(output=output, local_vars=local_vars, success=success)
76+
77+
78+
def main(model: str = ""):
79+
llm_config = lm.OpenAIGPTConfig(
80+
chat_model=model or lm.OpenAIChatModel.GPT4o,
81+
)
82+
agent = lr.ChatAgent(
83+
lr.ChatAgentConfig(
84+
name="Coder",
85+
llm=llm_config,
86+
# handle LLM non-tool msg
87+
handle_llm_no_tool=lambda msg: ResultTool(
88+
output=msg.content,
89+
success=True,
90+
),
91+
system_message=f"""
92+
You are an expert python coder. When you get a user's message,
93+
respond as follows:
94+
- if you think you need to run Python code,
95+
use the TOOL `{PyCodeTool.name()}` to perform the task.
96+
- otherwise simply respond to the user's message.
97+
""",
98+
)
99+
)
100+
agent.enable_message(PyCodeTool)
101+
# task specialized to return ResultTool
102+
# set restart to False to maintain conv history across `run` calls
103+
task = lr.Task(agent, interactive=False, restart=False)[ResultTool]
104+
105+
while True:
106+
user_input = Prompt.ask("User")
107+
if user_input.lower() in ["x", "q"]:
108+
break
109+
result: ResultTool | None = task.run(user_input)
110+
if result is not None:
111+
# code was run; do something with the output if any
112+
if result.success:
113+
print("Output:", result.output)
114+
else:
115+
print("Code execution failed.")
116+
117+
118+
if __name__ == "__main__":
119+
Fire(main)

examples/basic/schedule-extract.py

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,25 @@
44
Enter vague, unstructured info like:
55
66
M-F 8-3pm at home or Tue/Wed 9-1030am at daycare
7+
8+
Run like this -- (omit the -m arg for default gpt-4o-mini LLM)
9+
10+
```bash
11+
uv run examples/basic/schedule-extract.py -m gpt-4o
712
"""
813

914
import langroid as lr
1015
import langroid.language_models as lm
11-
from enum import Enum
1216
from langroid.agent.tools.orchestration import FinalResultTool
13-
from typing import List, Dict, Tuple
17+
from typing import List, Dict, Literal, Tuple
1418
from langroid.pydantic_v1 import BaseModel, Field
1519
from rich.prompt import Prompt
1620
from fire import Fire
1721

1822

1923
class Slot(BaseModel):
20-
start_time: float = Field(..., description="start time of the slot, e.g. 11:30AM")
21-
duration: float = Field(..., description="duration of the slot in MINUTES")
24+
start_time: str = Field(..., description="start time of the slot, e.g. 11:30AM")
25+
end_time: str = Field(..., description="end time of the slot, e.g. 12:30PM")
2226
location: str = Field(..., description="location of the slot or UNKNOWN")
2327

2428

@@ -30,28 +34,19 @@ class DaySchedule(BaseModel):
3034
slots: List[Slot] = Field(..., description="List of time slots for the day")
3135

3236

33-
class Weekday(int, Enum):
34-
"""
35-
A class to represent a weekday.
36-
"""
37-
38-
MON = 0
39-
TUE = 1
40-
WED = 2
41-
THU = 3
42-
FRI = 4
37+
Weekday = Literal["Mon", "Tue", "Wed", "Thu", "Fri"]
4338

4439

4540
class Availability(BaseModel):
4641
"""
4742
A class to represent schedule information.
4843
"""
4944

50-
week_availability: Dict[int, DaySchedule] = Field(
45+
week_availability: Dict[Weekday, DaySchedule] = Field(
5146
...,
5247
description="""
5348
Dictionary mapping weekday to DaySchedule,
54-
where 0 = Monday, 1 = Tuesday, ... 4 = Friday
49+
where weekday is one of "Mon", "Tue", "Wed", "Thu", "Fri"
5550
""",
5651
)
5752

@@ -77,17 +72,27 @@ def examples(cls) -> List["lr.ToolMessage" | Tuple[str, "lr.ToolMessage"]]:
7772
cls(
7873
availabilities=Availability(
7974
week_availability={
80-
Weekday.MON: DaySchedule(
75+
"Mon": DaySchedule(
8176
slots=[
82-
Slot(start_time=10, duration=360, location="home"),
8377
Slot(
84-
start_time=15, duration=60, location="daycare"
78+
start_time="10:00",
79+
end_time="16:00",
80+
location="home",
81+
),
82+
Slot(
83+
start_time="15:00",
84+
end_time="16:00",
85+
location="daycare",
8586
),
8687
]
8788
),
88-
Weekday.WED: DaySchedule(
89+
"Wed": DaySchedule(
8990
slots=[
90-
Slot(start_time=10, duration=360, location="home")
91+
Slot(
92+
start_time="10:00",
93+
end_time="16:00",
94+
location="home",
95+
)
9196
]
9297
),
9398
}
@@ -110,7 +115,7 @@ def handle(self) -> str:
110115

111116
def make_schedule_task(model: str = ""):
112117
llm_config = lm.OpenAIGPTConfig(
113-
chat_model=model or lm.GeminiModel.GEMINI_2_FLASH_LITE,
118+
chat_model=model or lm.OpenAIChatModel.GPT4o_MINI,
114119
)
115120
agent = lr.ChatAgent(
116121
lr.ChatAgentConfig(

examples/docqa/chat.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ def main(
8585
config = DocChatAgentConfig(
8686
llm=llm_config,
8787
n_query_rephrases=0,
88+
full_citations=True,
8889
hypothetical_answer=False,
8990
# how many sentences in each segment, for relevance-extraction:
9091
# increase this if you find that relevance extraction is losing context

examples/docqa/chat_search.py

Lines changed: 56 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,16 @@
1616
-f: use OpenAI functions api instead of tools
1717
-m <model_name>: run with a specific LLM
1818
(defaults to GPT4-Turbo if blank)
19+
-c <crawler_name>: specify a crawler to use for web search. Options are:
20+
"trafilatura" (default), "firecrawl"
1921
2022
See here for guide to using local LLMs with Langroid:
2123
https://langroid.github.io/langroid/tutorials/local-llm-setup/
2224
"""
2325

26+
import typer
2427
import re
25-
from typing import List, Any
28+
from typing import List, Any, Optional
2629

2730
from rich import print
2831
from rich.prompt import Prompt
@@ -41,6 +44,11 @@
4144
from langroid.utils.constants import NO_ANSWER
4245
from langroid.utils.configuration import set_global, Settings
4346
from fire import Fire
47+
from langroid.parsing.url_loader import (
48+
TrafilaturaConfig,
49+
FirecrawlConfig,
50+
ExaCrawlerConfig,
51+
)
4452

4553

4654
class RelevantExtractsTool(ToolMessage):
@@ -85,6 +93,26 @@ def instructions(cls) -> str:
8593

8694
class SearchDocChatAgent(DocChatAgent):
8795
tried_vecdb: bool = False
96+
crawler: Optional[str] = None
97+
98+
def __init__(self, config: DocChatAgentConfig, crawler: Optional[str] = None):
99+
super().__init__(config)
100+
self.tried_vecdb = False
101+
self.crawler = crawler
102+
self.update_crawler_config(crawler)
103+
104+
def update_crawler_config(self, crawler: Optional[str]):
105+
"""Updates the crawler config based on the crawler argument."""
106+
if crawler == "firecrawl":
107+
self.config.crawler_config = FirecrawlConfig()
108+
elif crawler == "trafilatura" or crawler is None:
109+
self.config.crawler_config = TrafilaturaConfig()
110+
elif crawler == "exa":
111+
self.config.crawler_config = ExaCrawlerConfig()
112+
else:
113+
raise ValueError(
114+
f"Unsupported crawler {crawler}. Options are: 'trafilatura', 'firecrawl'"
115+
)
88116

89117
def llm_response(
90118
self,
@@ -127,12 +155,32 @@ def cli():
127155
Fire(main)
128156

129157

158+
app = typer.Typer()
159+
160+
161+
@app.command()
130162
def main(
131163
debug: bool = False,
132164
nocache: bool = False,
133165
model: str = "",
134166
fn_api: bool = True,
167+
crawler: Optional[str] = typer.Option(
168+
None,
169+
"--crawler",
170+
"-c",
171+
help="Specify a crawler to use (trafilatura, firecrawl)",
172+
),
135173
) -> None:
174+
"""
175+
Main function to run the chatbot.
176+
177+
Args:
178+
debug (bool): Enable debug mode.
179+
nocache (bool): Disable caching.
180+
model (str): Specify the LLM model to use.
181+
fn_api (bool): Use OpenAI functions API instead of tools.
182+
crawler (str): Specify the crawler to use for web search.
183+
"""
136184

137185
set_global(
138186
Settings(
@@ -169,7 +217,7 @@ def main(
169217
# "ollama/llama2"
170218
# "local/localhost:8000/v1"
171219
# "local/localhost:8000"
172-
chat_context_length=2048, # adjust based on model
220+
chat_context_length=8000, # adjust based on model
173221
)
174222

175223
config = DocChatAgentConfig(
@@ -188,7 +236,7 @@ def main(
188236
3. If you are still unable to answer, you can use the `relevant_search_extracts`
189237
tool/function-call to get some text from a web search. Once you receive the
190238
text, you can use it to answer my question.
191-
4. If you still can't answer, simply say {NO_ANSWER}
239+
5. If you still can't answer, simply say {NO_ANSWER}
192240
193241
Remember to always FIRST try `relevant_extracts` to see if there are already
194242
any relevant docs, before trying web-search with `relevant_search_extracts`.
@@ -204,7 +252,7 @@ def main(
204252
""",
205253
)
206254

207-
agent = SearchDocChatAgent(config)
255+
agent = SearchDocChatAgent(config, crawler=crawler)
208256
agent.enable_message(RelevantExtractsTool)
209257
agent.enable_message(RelevantSearchExtractsTool)
210258
collection_name = Prompt.ask(
@@ -225,8 +273,10 @@ def main(
225273
agent.vecdb.set_collection(collection_name, replace=replace)
226274

227275
task = Task(agent, interactive=False)
228-
task.run("Can you help me answer some questions, possibly using web search?")
276+
task.run(
277+
"Can you help me answer some questions, possibly using web search and crawling?"
278+
)
229279

230280

231281
if __name__ == "__main__":
232-
Fire(main)
282+
app()

0 commit comments

Comments
 (0)