|
| 1 | +""" |
| 2 | +Function-calling example using a local LLM, with ollama. |
| 3 | +
|
| 4 | +"Function-calling" refers to the ability of the LLM to generate |
| 5 | +a structured response, typically a JSON object, instead of a plain text response, |
| 6 | +which is then interpreted by your code to perform some action. |
| 7 | +This is also referred to in various scenarios as "Tools", "Actions" or "Plugins". |
| 8 | +See more here: https://langroid.github.io/langroid/quick-start/chat-agent-tool/ |
| 9 | +
|
| 10 | +Run like this (to run with llama-3.1-8b-instant via groq): |
| 11 | +
|
| 12 | +python3 examples/basic/text-to-structured.py -m groq/llama-3.1-8b-instant |
| 13 | +
|
| 14 | +Other models to try it with: |
| 15 | +- ollama/qwen2.5-coder |
| 16 | +- ollama/qwen2.5 |
| 17 | +
|
| 18 | +
|
| 19 | +See here for how to set up a Local LLM to work with Langroid: |
| 20 | +https://langroid.github.io/langroid/tutorials/local-llm-setup/ |
| 21 | +
|
| 22 | +
|
| 23 | +""" |
| 24 | + |
| 25 | +import os |
| 26 | +from typing import List, Literal |
| 27 | +import fire |
| 28 | +import json |
| 29 | +from rich.prompt import Prompt |
| 30 | + |
| 31 | +from langroid.pydantic_v1 import BaseModel, Field |
| 32 | +import langroid as lr |
| 33 | +from langroid.utils.configuration import settings |
| 34 | +from langroid.agent.tool_message import ToolMessage |
| 35 | +from langroid.agent.tools.orchestration import ResultTool |
| 36 | +import langroid.language_models as lm |
| 37 | + |
| 38 | +# for best results: |
| 39 | +DEFAULT_LLM = lm.OpenAIChatModel.GPT4o |
| 40 | + |
| 41 | +os.environ["TOKENIZERS_PARALLELISM"] = "false" |
| 42 | + |
| 43 | +# (1) Define the desired structure via Pydantic. |
| 44 | +# The "Field" annotations are optional, and are included in the system message |
| 45 | +# if provided, and help with generation accuracy. |
| 46 | + |
| 47 | + |
| 48 | +class Wifi(BaseModel): |
| 49 | + name: str |
| 50 | + |
| 51 | + |
| 52 | +class HomeSettings(BaseModel): |
| 53 | + App: List[str] = Field(..., description="List of apps found in text") |
| 54 | + wifi: List[Wifi] = Field(..., description="List of wifi networks found in text") |
| 55 | + brightness: Literal["low", "medium", "high"] = Field( |
| 56 | + ..., description="Brightness level found in text" |
| 57 | + ) |
| 58 | + |
| 59 | + |
| 60 | +# (2) Define the Tool class for the LLM to use, to produce the above structure. |
| 61 | +class HomeAutomationTool(lr.agent.ToolMessage): |
| 62 | + """Tool to extract Home Automation structure from text""" |
| 63 | + |
| 64 | + request: str = "home_automation_tool" |
| 65 | + purpose: str = """ |
| 66 | + To extract <home_settings> structure from a given text. |
| 67 | + """ |
| 68 | + home_settings: HomeSettings = Field( |
| 69 | + ..., description="Home Automation settings from given text" |
| 70 | + ) |
| 71 | + |
| 72 | + def handle(self) -> str: |
| 73 | + """Handle LLM's structured output if it matches HomeAutomationTool structure""" |
| 74 | + print( |
| 75 | + f""" |
| 76 | + SUCCESS! Got Valid Home Automation Settings: |
| 77 | + {json.dumps(self.home_settings.dict(), indent=2)} |
| 78 | + """ |
| 79 | + ) |
| 80 | + return ResultTool(settings=self.home_settings) |
| 81 | + |
| 82 | + @classmethod |
| 83 | + def examples(cls) -> List["ToolMessage"]: |
| 84 | + # Used to provide few-shot examples in the system prompt |
| 85 | + return [ |
| 86 | + ( |
| 87 | + """ |
| 88 | + I have extracted apps Spotify and Netflix, |
| 89 | + wifi HomeWifi, and brightness medium |
| 90 | + """, |
| 91 | + cls( |
| 92 | + home_settings=HomeSettings( |
| 93 | + App=["Spotify", "Netflix"], |
| 94 | + wifi=[Wifi(name="HomeWifi")], |
| 95 | + brightness="medium", |
| 96 | + ) |
| 97 | + ), |
| 98 | + ) |
| 99 | + ] |
| 100 | + |
| 101 | + |
| 102 | +def app( |
| 103 | + m: str = DEFAULT_LLM, # model |
| 104 | + d: bool = False, # pass -d to enable debug mode (see prompts etc) |
| 105 | + nc: bool = False, # pass -nc to disable cache-retrieval (i.e. get fresh answers) |
| 106 | +): |
| 107 | + settings.debug = d |
| 108 | + settings.cache = not nc |
| 109 | + # create LLM config |
| 110 | + llm_cfg = lm.OpenAIGPTConfig( |
| 111 | + chat_model=m or DEFAULT_LLM, |
| 112 | + chat_context_length=4096, # set this based on model |
| 113 | + max_output_tokens=100, |
| 114 | + temperature=0.2, |
| 115 | + stream=True, |
| 116 | + timeout=45, |
| 117 | + ) |
| 118 | + |
| 119 | + tool_name = HomeAutomationTool.default_value("request") |
| 120 | + config = lr.ChatAgentConfig( |
| 121 | + llm=llm_cfg, |
| 122 | + system_message=f""" |
| 123 | + You are an expert in extracting home automation settings from text. |
| 124 | + When user gives a piece of text, use the TOOL `{tool_name}` |
| 125 | + to present the extracted structured information. |
| 126 | + """, |
| 127 | + ) |
| 128 | + |
| 129 | + agent = lr.ChatAgent(config) |
| 130 | + |
| 131 | + # (4) Enable the Tool for this agent --> this auto-inserts JSON instructions |
| 132 | + # and few-shot examples (specified in the tool defn above) into the system message |
| 133 | + agent.enable_message(HomeAutomationTool) |
| 134 | + |
| 135 | + # (5) Create task and run it to start an interactive loop |
| 136 | + # Specialize the task to return a ResultTool object |
| 137 | + task = lr.Task(agent, interactive=False)[ResultTool] |
| 138 | + |
| 139 | + # set up a loop to extract Home Automation settings from text |
| 140 | + while True: |
| 141 | + text = Prompt.ask("[blue]Enter text (or q/x to exit)") |
| 142 | + if not text or text.lower() in ["x", "q"]: |
| 143 | + break |
| 144 | + result = task.run(text) |
| 145 | + assert isinstance(result, ResultTool) |
| 146 | + assert isinstance(result.settings, HomeSettings) |
| 147 | + |
| 148 | + |
| 149 | +if __name__ == "__main__": |
| 150 | + fire.Fire(app) |
0 commit comments