diff --git a/examples/function_calling_agent.json b/examples/function_calling_agent.json index db7cd0c..7e37c2a 100644 --- a/examples/function_calling_agent.json +++ b/examples/function_calling_agent.json @@ -2,13 +2,13 @@ "llama_generation_settings": { "temperature": 0.65, "top_k": 40, - "top_p": 0.5, + "top_p": 0.95, "min_p": 0.05, "n_predict": -1, "n_keep": 0, "stream": true, "stop_sequences": [], - "tfs_z": 0.975, + "tfs_z": 1.0, "typical_p": 1.0, "repeat_penalty": 1.1, "repeat_last_n": 64, @@ -22,25 +22,29 @@ "seed": -1, "ignore_eos": false }, - "system_prompt": "You are an advanced AI assistant. You are interacting with the user and with your environment by calling functions. You call functions by writing JSON objects, which represent specific function calls.\nBelow is a list of your available function calls:\n\nFunction: send_message_to_user\n Description: Send a message to the user.\n Parameters:\n message (str):\n Description: The message send to the user.\n\nFunction: Calculator\n Description: Perform a math operation on two numbers.\n Parameters:\n number_one (any):\n Description: First number.\n operation (math-operation):\n Description: Math operation to perform.\n number_two (any):\n Description: Second number.\n\nFunction: write_to_file\n Description: Write file to the user filesystem.\n Parameters:\n chain_of_thought (str):\n Description: Your chain of thought while writing the file.\n file_path (str):\n Description: The file path includes the filename and file ending.\n file_content (str):\n Description: The actual content to write.\n\nFunction: read_file\n Description: Read file from the user filesystem.\n Parameters:\n file_path (str):\n Description: The file path includes the filename and file ending.\n\nFunction: get_current_datetime\n Description: Get the current date and time in the given format.\n Parameters:\n output_format (str or none-type):\n Description: format string for the date and time. Defaults to '%Y-%m-%d %H:%M:%S'\n\nFunction: get_current_weather\n Parameters:\n location (str)\n unit (unit or none-type)\n\n", + "system_prompt": "You are an advanced AI assistant. You are interacting with the user and with your environment by calling functions. You call functions by writing JSON objects, which represent specific function calls.\nBelow is a list of your available function calls:\n\nFunction: send_message_to_user\n Description: Send a message to the user.\n Parameters:\n message (str):\n Description: The message send to the user.\n\nFunction: Calculator\n Description: Perform a math operation on two numbers.\n Parameters:\n number_one (int or float):\n Description: First number.\n operation (math-operation):\n Description: Math operation to perform.\n number_two (int or float):\n Description: Second number.\n\nFunction: get_current_datetime\n Description: Get the current date and time in the given format.\n Parameters:\n output_format (str or none-type):\n Description: formatting string for the date and time, defaults to '%Y-%m-%d %H:%M:%S'\n\nFunction: get_current_weather\n Parameters:\n location (str)\n unit (unit or none-type)\n\n", "k_last_messages_from_chat_history": 0, "debug_output": true, "messages": [ { "role": "user", - "content": "Solve the following calculations: 42 * 42, 24 * 24, 5 * 5" + "content": "Format the answer clearly: Get the date and time, get the current weather in celsius in London and solve the following calculation: 42 * 42" }, { "role": "assistant", - "content": "[\n {\n \"function\": \"Calculator\",\n \"params\": {\n \"number_one\": 42,\n \"operation\": \"multiply\",\n \"number_two\": 42\n }\n },\n {\n \"function\": \"Calculator\",\n \"params\": {\n \"number_one\": 24,\n \"operation\": \"multiply\",\n \"number_two\": 24\n }\n },\n {\n \"function\": \"Calculator\",\n \"params\": {\n \"number_one\": 5,\n \"operation\": \"multiply\",\n \"number_two\": 5\n }\n }\n]" + "content": "[\n {\n \"function\": \"get_current_datetime\",\n \"params\": {\n \"output_format\": \"%Y-%m-%d %H:%M:%S\"\n }\n },\n {\n \"function\": \"get_current_weather\",\n \"params\": {\n \"location\": \"London\",\n \"unit\": \"celsius\"\n }\n },\n {\n \"function\": \"Calculator\",\n \"params\": {\n \"number_one\": 42,\n \"operation\": \"multiply\",\n \"number_two\": 42\n }\n }\n]" }, { "role": "function", - "content": "FUNCTION_CALLING_RESULTS:\n1764\n\n576\n\n25" + "content": "Results from calling functions:\n2024-01-16 10:11:46\n\n{\"location\": \"London\", \"temperature\": \"42\", \"unit\": \"celsius\"}\n\n1764" }, { "role": "assistant", - "content": "[\n {\n \"function\": \"send_message_to_user\",\n \"params\": {\n \"message\": \"1764\"\n }\n },\n {\n \"function\": \"send_message_to_user\",\n \"params\": {\n \"message\": \"576\"\n }\n },\n {\n \"function\": \"send_message_to_user\",\n \"params\": {\n \"message\": \"25\"\n }\n }\n]" + "content": "[\n {\n \"function\": \"send_message_to_user\",\n \"params\": {\n \"message\": \"The date and time is 2024-01-16 10:11:46.\"\n }\n },\n {\n \"function\": \"send_message_to_user\",\n \"params\": {\n \"message\": \"The current weather in London is 42 degrees Celsius.\"\n }\n },\n {\n \"function\": \"send_message_to_user\",\n \"params\": {\n \"message\": \"The result of the calculation 42 * 42 is 1764.\"\n }\n }\n]" + }, + { + "role": "function", + "content": "Results from calling functions:\nNone\n\nNone\n\nNone" } ], "custom_messages_formatter": { @@ -57,7 +61,7 @@ ], "FUNCTION_PROMPT_START": "<|im_start|>function\n", "FUNCTION_PROMPT_END": "<|im_end|>\n", - "USE_USER_ROLE_FUNCTION_CALL_RESULT": true, + "USE_USER_ROLE_FUNCTION_CALL_RESULT": false, "STRIP_PROMPT": true } } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 74f315e..544b74c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "llama-cpp-agent" -version = "0.0.15" +version = "0.0.16" description = "A framework for building LLM based AI agents with llama-cpp-python." readme = "ReadMe.md" diff --git a/src/llama_cpp_agent/function_calling_agent.py b/src/llama_cpp_agent/function_calling_agent.py index ee970e4..e512725 100644 --- a/src/llama_cpp_agent/function_calling_agent.py +++ b/src/llama_cpp_agent/function_calling_agent.py @@ -247,11 +247,21 @@ def generate_response(self, message: str): """ count = 0 msg = copy(message) - while msg and not ("None" in '\n'.join([str(m) for m in msg])): + while msg: if count > 0: - msg = "FUNCTION_CALLING_RESULTS:\n" + '\n\n'.join([str(m) for m in msg]) - msg = self.llama_cpp_agent.get_chat_response(msg, role="function", system_prompt=self.system_prompt, + msg = "Results from calling functions:\n" + '\n\n'.join([str(m) for m in msg]) + self.llama_cpp_agent.add_message(role="function", message=msg) + lines = msg.splitlines() + + found_none = False + for line in lines: + if line.startswith("None"): + found_none = True + break + if found_none: + break + msg = self.llama_cpp_agent.get_chat_response(system_prompt=self.system_prompt, function_tool_registry=self.tool_registry, streaming_callback=self.streaming_callback, k_last_messages=self.k_last_messages_from_chat_history, diff --git a/src/llama_cpp_agent/llm_agent.py b/src/llama_cpp_agent/llm_agent.py index 2145ca4..cc69381 100644 --- a/src/llama_cpp_agent/llm_agent.py +++ b/src/llama_cpp_agent/llm_agent.py @@ -86,7 +86,7 @@ def get_function_tool_registry(function_tool_list: List[LlamaCppFunctionTool], a function_tool_registry.finalize() return function_tool_registry - def add_message(self, message: str, role: Literal["system"] | Literal["user"] | Literal["assistant"] = "user"): + def add_message(self, message: str, role: Literal["system"] | Literal["user"] | Literal["assistant"] | Literal["function"] = "user"): """ Adds a message to the chat history. diff --git a/src/llama_cpp_agent/messages_formatter.py b/src/llama_cpp_agent/messages_formatter.py index ff415e3..4b1de8f 100644 --- a/src/llama_cpp_agent/messages_formatter.py +++ b/src/llama_cpp_agent/messages_formatter.py @@ -170,14 +170,14 @@ def format_messages(self, messages: List[Dict[str, str]]) -> Tuple[str, str]: last_role = "assistant" elif message["role"] == "function": if isinstance(message["content"], list): - message["content"] = "FUNCTION_CALLING_RESULTS:\n" + '\n\n'.join([str(m) for m in message["content"]]) + message["content"] = "Results from calling functions:\n" + '\n'.join([str(m) for m in message["content"]]) if self.USE_USER_ROLE_FUNCTION_CALL_RESULT: formatted_messages += self.USER_PROMPT_START + message["content"] + self.USER_PROMPT_END last_role = "user" else: formatted_messages += self.FUNCTION_PROMPT_START + message["content"] + self.FUNCTION_PROMPT_END last_role = "function" - if last_role == "system" or last_role == "user": + if last_role == "system" or last_role == "user" or last_role == "function": if self.STRIP_PROMPT: return formatted_messages + self.ASSISTANT_PROMPT_START.strip(), "assistant" else: @@ -240,7 +240,7 @@ def as_dict(self) -> dict: ASSISTANT_PROMPT_END_MIXTRAL, True, DEFAULT_MIXTRAL_STOP_SEQUENCES) chatml_formatter = MessagesFormatter("", SYS_PROMPT_START_CHATML, SYS_PROMPT_END_CHATML, USER_PROMPT_START_CHATML, USER_PROMPT_END_CHATML, ASSISTANT_PROMPT_START_CHATML, - ASSISTANT_PROMPT_END_CHATML, False, DEFAULT_CHATML_STOP_SEQUENCES, True, FUNCTION_PROMPT_START_CHATML, FUNCTION_PROMPT_END_CHATML) + ASSISTANT_PROMPT_END_CHATML, False, DEFAULT_CHATML_STOP_SEQUENCES, False, FUNCTION_PROMPT_START_CHATML, FUNCTION_PROMPT_END_CHATML) vicuna_formatter = MessagesFormatter("", SYS_PROMPT_START_VICUNA, SYS_PROMPT_END_VICUNA, USER_PROMPT_START_VICUNA, USER_PROMPT_END_VICUNA, ASSISTANT_PROMPT_START_VICUNA, ASSISTANT_PROMPT_END_VICUNA, False, DEFAULT_VICUNA_STOP_SEQUENCES)