From e368f7cbdc5d47443ad4df5b0c19f01a0f4ca480 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Sat, 11 May 2024 15:37:17 +0100 Subject: [PATCH 01/16] Updated command line --- README.md | 7 ++++++- documentation/source/configuration.rst | 3 +-- documentation/source/running_WAFL.rst | 7 ++++++- license.txt | 2 +- todo.txt | 6 +++--- wafl/command_line.py | 10 ++++++++-- wafl/runners/run_web_interface.py | 4 ++-- wafl/variables.py | 2 +- 8 files changed, 28 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index b1996ad3..afd5d450 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,11 @@ Running WAFL This document contains a few examples of how to use the `wafl` CLI. There are four modes in which to run the system + +### $ wafl run +Starts all the available interfaces of the chatbot at the same time. + + ## $ wafl run-audio This is the main mode of operation. It will run the system in a loop, waiting for the user to speak a command. @@ -82,7 +87,7 @@ The default name is "computer", but you can change it to whatever you want. ## $ wafl run-server -It runs a local web server that listens for HTTP requests on port 8889. +It runs a local web server that listens for HTTP requests on port 8090. The server will act as a chatbot, executing commands and returning the result as defined in the rules. diff --git a/documentation/source/configuration.rst b/documentation/source/configuration.rst index b2d4a730..a1722f00 100644 --- a/documentation/source/configuration.rst +++ b/documentation/source/configuration.rst @@ -49,8 +49,7 @@ These settings regulate the following: * "frontend_port" is the port where the web frontend is running. The default is 8090. - * "llm_model" is the configuration to connect to the LLM model in the backend. The default is "localhost:8080". - The "temperature" parameter is used to set the temperature for the LLM model. The default is 0.4. + * "llm_model" is the configuration to connect to wafl-llm in the backend. The default url is "localhost:8080". The "temperature" parameter is used to set the temperature for the LLM model. The default is 0.4. * "listener_model" is the configuration to connect to the listener model in the backend. The default is "localhost:8080". diff --git a/documentation/source/running_WAFL.rst b/documentation/source/running_WAFL.rst index 7cfc03c0..c8e0c10b 100644 --- a/documentation/source/running_WAFL.rst +++ b/documentation/source/running_WAFL.rst @@ -3,6 +3,11 @@ Running WAFL This document contains a few examples of how to use the `wafl` CLI. There are four modes in which to run the system +$ wafl run +---------- +Starts all the available interfaces of the chatbot at the same time. + + $ wafl run-audio ---------------- @@ -14,7 +19,7 @@ The default name is "computer", but you can change it to whatever you want. $ wafl run-server ----------------- -It runs a local web server that listens for HTTP requests on port 8889. +It runs a local web server that listens for HTTP requests on port 8090. The server will act as a chatbot, executing commands and returning the result as defined in the rules. diff --git a/license.txt b/license.txt index 76d24377..1eac25f7 100644 --- a/license.txt +++ b/license.txt @@ -1,4 +1,4 @@ -Copyright (c) 2023 alberto.cetoli@fractalego.io +Copyright (c) 2024 alberto.cetoli@fractalego.io Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/todo.txt b/todo.txt index 3d726a32..cf0025b1 100644 --- a/todo.txt +++ b/todo.txt @@ -6,9 +6,9 @@ /* let user decide port for frontend /* update docs about port /* push new version -* update pypi with wafl and wafl-llm -* clean code for llm eval and make it public -* update huggingface readme +/* update pypi with wafl and wafl-llm +/* clean code for llm eval and make it public +/* update huggingface readme * read overleaf paper diff --git a/wafl/command_line.py b/wafl/command_line.py index fc68db06..edd79372 100644 --- a/wafl/command_line.py +++ b/wafl/command_line.py @@ -11,13 +11,15 @@ ) from wafl.runners.run_from_actions import run_action from wafl.runners.run_from_audio import run_from_audio -from wafl.runners.run_web_interface import run_app +from wafl.runners.run_web_and_audio_interface import run_app +from wafl.runners.run_web_interface import run_server_only_app def print_help(): print("\n") print("These are the available commands:") print("> wafl init: Initialize the current folder") + print("> wafl run: Starts all the available interfaces of the chatbot at the same time") print("> wafl run-cli: Run a cli version of the chatbot") print("> wafl run-audio: Run a voice-powered version of the chatbot") print("> wafl run-server: Run a webserver version of the chatbot") @@ -44,6 +46,10 @@ def process_cli(): create_initial_files() download_models() + if command == "run": + run_app() + remove_preprocessed("/") + elif command == "run-cli": run_from_command_line() remove_preprocessed("/") @@ -53,7 +59,7 @@ def process_cli(): remove_preprocessed("/") elif command == "run-server": - run_app() + run_server_only_app() remove_preprocessed("/") elif command == "run-tests": diff --git a/wafl/runners/run_web_interface.py b/wafl/runners/run_web_interface.py index ac285396..eae0b696 100644 --- a/wafl/runners/run_web_interface.py +++ b/wafl/runners/run_web_interface.py @@ -19,7 +19,7 @@ _logger = LocalFileLogger() -def run_app(): +def run_server_only_app(): @app.route("/create_new_instance", methods=["POST"]) def create_new_instance(): conversation_id = random.randint(0, sys.maxsize) @@ -61,4 +61,4 @@ def create_scheduler_and_webserver_loop(conversation_id): if __name__ == "__main__": - run_app() + run_server_only_app() diff --git a/wafl/variables.py b/wafl/variables.py index 1802c460..49e98fe7 100644 --- a/wafl/variables.py +++ b/wafl/variables.py @@ -1,6 +1,6 @@ def get_variables(): return { - "version": "0.0.83", + "version": "0.0.84", } From dd0deda30a82498f3ff8021a176371b59ceca7bb Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Wed, 15 May 2024 16:54:26 +0100 Subject: [PATCH 02/16] change documentation about initialization --- documentation/source/installation.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/documentation/source/installation.rst b/documentation/source/installation.rst index 362a1398..ea5a25a2 100644 --- a/documentation/source/installation.rst +++ b/documentation/source/installation.rst @@ -24,9 +24,10 @@ After installing the requirements, you can initialize the interface by running t .. code-block:: bash $ wafl init + $ wafl run -which creates a `config.json` file that you can edit to change the default settings. -A standard rule file is also created as `wafl.rules`. +The first command creates a set of template files, including a configuration in `config.json` that you can edit to change the default settings. +The second command starts the audio interface as well as a web server on port 8090 by default. Please see the examples in the following chapters. From 62a3f242074efb884eca386438de8aca52601768 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Fri, 17 May 2024 08:16:07 +0100 Subject: [PATCH 03/16] removed Docker instructions --- README.md | 7 ------- documentation/source/installation.rst | 7 ------- 2 files changed, 14 deletions(-) diff --git a/README.md b/README.md index afd5d450..8b95a194 100644 --- a/README.md +++ b/README.md @@ -56,13 +56,6 @@ wafl-llm start ``` which will use the default models and start the server on port 8080. -#### Docker -A docker image can be used to run it as in the following: - -```bash -$ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:0.80 -``` - The interface side has a `config.json` file that needs to be filled with the IP address of the LLM side. The default is localhost. Alternatively, you can run the LLM side by cloning [this repository](https://github.com/fractalego/wafl-llm). diff --git a/documentation/source/installation.rst b/documentation/source/installation.rst index ea5a25a2..b6a794a5 100644 --- a/documentation/source/installation.rst +++ b/documentation/source/installation.rst @@ -43,13 +43,6 @@ In order to quickly run the LLM side, you can use the following installation com which will use the default models and start the server on port 8080. -Alternatively, a Docker image can be used to run it as in the following: - -.. code-block:: bash - - $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:0.80 - - The interface side has a `config.json` file that needs to be filled with the IP address of the LLM side. The default is localhost. From 1caf68ed30a155d54311ebbc7bc64c8e697c39e6 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Fri, 17 May 2024 09:23:33 +0100 Subject: [PATCH 04/16] Added prompt and utterances dataclasses --- wafl/answerer/dialogue_answerer.py | 14 ++++++++++--- wafl/command_line.py | 4 +++- wafl/connectors/prompt.py | 22 +++++++++++++++++++++ wafl/interface/conversation.py | 20 +++++++++++++++++++ wafl/runners/run_web_and_audio_interface.py | 5 ++++- wafl/runners/run_web_interface.py | 5 ++++- wafl/variables.py | 2 +- 7 files changed, 65 insertions(+), 7 deletions(-) create mode 100644 wafl/connectors/prompt.py create mode 100644 wafl/interface/conversation.py diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index e4b1a160..3f8eb2bf 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -160,7 +160,11 @@ def _init_python_module(self, module_name): self._functions = [item[0] for item in getmembers(self._module, isfunction)] async def _substitute_results_in_answer(self, answer_text): - matches = re.finditer(r"(.*?)|(.*?\))$", answer_text, re.DOTALL|re.MULTILINE) + matches = re.finditer( + r"(.*?)|(.*?\))$", + answer_text, + re.DOTALL | re.MULTILINE, + ) for match in matches: to_execute = match.group(1) if not to_execute: @@ -168,7 +172,9 @@ async def _substitute_results_in_answer(self, answer_text): result = await self._run_code(to_execute) answer_text = answer_text.replace(match.group(0), result) - matches = re.finditer(r"(.*?\))$", answer_text, re.DOTALL|re.MULTILINE) + matches = re.finditer( + r"(.*?\))$", answer_text, re.DOTALL | re.MULTILINE + ) for match in matches: to_execute = match.group(1) if not to_execute: @@ -188,7 +194,9 @@ async def _substitute_memory_in_answer_and_get_memories_if_present( answer_text = answer_text.replace(match.group(0), "") memories.append(to_execute) - matches = re.finditer(r"(.*?\))$", answer_text, re.DOTALL|re.MULTILINE) + matches = re.finditer( + r"(.*?\))$", answer_text, re.DOTALL | re.MULTILINE + ) memories = [] for match in matches: to_execute = match.group(1) diff --git a/wafl/command_line.py b/wafl/command_line.py index edd79372..94ec8a1c 100644 --- a/wafl/command_line.py +++ b/wafl/command_line.py @@ -19,7 +19,9 @@ def print_help(): print("\n") print("These are the available commands:") print("> wafl init: Initialize the current folder") - print("> wafl run: Starts all the available interfaces of the chatbot at the same time") + print( + "> wafl run: Starts all the available interfaces of the chatbot at the same time" + ) print("> wafl run-cli: Run a cli version of the chatbot") print("> wafl run-audio: Run a voice-powered version of the chatbot") print("> wafl run-server: Run a webserver version of the chatbot") diff --git a/wafl/connectors/prompt.py b/wafl/connectors/prompt.py new file mode 100644 index 00000000..40e7d90d --- /dev/null +++ b/wafl/connectors/prompt.py @@ -0,0 +1,22 @@ +from dataclasses import dataclass + + +@dataclass +class Prompt: + system_prompt: str = "" + conversation: "Conversation" = None + + def to_dict(self): + return { + "system_prompt": self.system_prompt, + "conversation": self.conversation.to_dict(), + } + + +class PrompCreator: + @staticmethod + def create(system_prompt: str, conversation: "Conversation") -> Prompt: + prompt = Prompt() + prompt.system_prompt = system_prompt + prompt.conversation = conversation + return prompt diff --git a/wafl/interface/conversation.py b/wafl/interface/conversation.py new file mode 100644 index 00000000..de794e3d --- /dev/null +++ b/wafl/interface/conversation.py @@ -0,0 +1,20 @@ +from dataclasses import dataclass +from typing import List + + +@dataclass +class Utterance: + text: str + speaker: str + timestamp: str + + def to_dict(self): + return {"text": self.text, "speaker": self.speaker, "timestamp": self.timestamp} + + +@dataclass +class Conversation: + utterances: List[Utterance] = [] + + def to_dict(self): + return {"utterances": [utterance.to_dict() for utterance in self.utterances]} diff --git a/wafl/runners/run_web_and_audio_interface.py b/wafl/runners/run_web_and_audio_interface.py index 7f705db9..0c22c092 100644 --- a/wafl/runners/run_web_and_audio_interface.py +++ b/wafl/runners/run_web_and_audio_interface.py @@ -55,7 +55,10 @@ def create_scheduler_and_webserver_loop(conversation_id): "web_server_loop": web_loop, } - app.run(host="0.0.0.0", port=Configuration.load_local_config().get_value("frontend_port")) + app.run( + host="0.0.0.0", + port=Configuration.load_local_config().get_value("frontend_port"), + ) if __name__ == "__main__": diff --git a/wafl/runners/run_web_interface.py b/wafl/runners/run_web_interface.py index eae0b696..1ad3d142 100644 --- a/wafl/runners/run_web_interface.py +++ b/wafl/runners/run_web_interface.py @@ -57,7 +57,10 @@ def create_scheduler_and_webserver_loop(conversation_id): "web_server_loop": web_loop, } - app.run(host="0.0.0.0", port=Configuration.load_local_config().get_value("frontend_port")) + app.run( + host="0.0.0.0", + port=Configuration.load_local_config().get_value("frontend_port"), + ) if __name__ == "__main__": diff --git a/wafl/variables.py b/wafl/variables.py index 49e98fe7..0eee6ad9 100644 --- a/wafl/variables.py +++ b/wafl/variables.py @@ -6,4 +6,4 @@ def get_variables(): def is_supported(wafl_llm_version): supported_versions = ["0.0.82", "0.0.83"] - return wafl_llm_version in supported_versions \ No newline at end of file + return wafl_llm_version in supported_versions From a892c839449d76cbe7042205d3622d0eb4ec6971 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Fri, 17 May 2024 13:49:46 +0100 Subject: [PATCH 05/16] Changed loop -> handler, bridge -> client --- tests/test_connection.py | 2 -- tests/test_voice.py | 1 - wafl/answerer/dialogue_answerer.py | 4 ++-- wafl/connectors/{bridges => clients}/__init__.py | 0 .../clients_implementation.py} | 0 .../llm_chitchat_answer_client.py} | 2 +- wafl/runners/run_from_audio.py | 4 ++-- wafl/runners/run_web_and_audio_interface.py | 9 ++++----- wafl/runners/run_web_interface.py | 8 ++++---- .../{conversation_loop.py => conversation_handler.py} | 2 +- ...enerated_event_loop.py => generated_event_handler.py} | 2 +- wafl/scheduler/{web_loop.py => web_handler.py} | 2 +- 12 files changed, 16 insertions(+), 20 deletions(-) rename wafl/connectors/{bridges => clients}/__init__.py (100%) rename wafl/connectors/{bridges/bridge_implementation.py => clients/clients_implementation.py} (100%) rename wafl/connectors/{bridges/llm_chitchat_answer_bridge.py => clients/llm_chitchat_answer_client.py} (97%) rename wafl/scheduler/{conversation_loop.py => conversation_handler.py} (99%) rename wafl/scheduler/{generated_event_loop.py => generated_event_handler.py} (95%) rename wafl/scheduler/{web_loop.py => web_handler.py} (99%) diff --git a/tests/test_connection.py b/tests/test_connection.py index d81daf20..854c223d 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -3,9 +3,7 @@ from unittest import TestCase from wafl.config import Configuration -from wafl.connectors.bridges.llm_chitchat_answer_bridge import LLMChitChatAnswerBridge from wafl.connectors.remote.remote_llm_connector import RemoteLLMConnector -from wafl.speaker.fairseq_speaker import FairSeqSpeaker _path = os.path.dirname(__file__) diff --git a/tests/test_voice.py b/tests/test_voice.py index 6a95cba8..4c07d77e 100644 --- a/tests/test_voice.py +++ b/tests/test_voice.py @@ -5,7 +5,6 @@ from unittest import TestCase from wafl.config import Configuration -from wafl.interface.voice_interface import VoiceInterface from wafl.events.conversation_events import ConversationEvents from wafl.interface.dummy_interface import DummyInterface from wafl.listener.whisper_listener import WhisperListener diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 3f8eb2bf..c71da1bc 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -11,7 +11,7 @@ ) from wafl.answerer.base_answerer import BaseAnswerer from wafl.answerer.rule_maker import RuleMaker -from wafl.connectors.bridges.llm_chitchat_answer_bridge import LLMChitChatAnswerBridge +from wafl.connectors.clients.llm_chitchat_answer_client import LLMChitChatAnswerClient from wafl.exceptions import CloseConversation from wafl.extractors.dataclasses import Query, Answer from wafl.simple_text_processing.questions import is_question @@ -20,7 +20,7 @@ class DialogueAnswerer(BaseAnswerer): def __init__(self, config, knowledge, interface, code_path, logger): self._delete_current_rule = "" - self._bridge = LLMChitChatAnswerBridge(config) + self._bridge = LLMChitChatAnswerClient(config) self._knowledge = knowledge self._logger = logger self._interface = interface diff --git a/wafl/connectors/bridges/__init__.py b/wafl/connectors/clients/__init__.py similarity index 100% rename from wafl/connectors/bridges/__init__.py rename to wafl/connectors/clients/__init__.py diff --git a/wafl/connectors/bridges/bridge_implementation.py b/wafl/connectors/clients/clients_implementation.py similarity index 100% rename from wafl/connectors/bridges/bridge_implementation.py rename to wafl/connectors/clients/clients_implementation.py diff --git a/wafl/connectors/bridges/llm_chitchat_answer_bridge.py b/wafl/connectors/clients/llm_chitchat_answer_client.py similarity index 97% rename from wafl/connectors/bridges/llm_chitchat_answer_bridge.py rename to wafl/connectors/clients/llm_chitchat_answer_client.py index 88449c68..e9e08f66 100644 --- a/wafl/connectors/bridges/llm_chitchat_answer_bridge.py +++ b/wafl/connectors/clients/llm_chitchat_answer_client.py @@ -5,7 +5,7 @@ _path = os.path.dirname(__file__) -class LLMChitChatAnswerBridge: +class LLMChitChatAnswerClient: def __init__(self, config): self._connector = LLMConnectorFactory.get_connector(config) self._config = config diff --git a/wafl/runners/run_from_audio.py b/wafl/runners/run_from_audio.py index ddd5fa53..c8889c51 100644 --- a/wafl/runners/run_from_audio.py +++ b/wafl/runners/run_from_audio.py @@ -2,7 +2,7 @@ from wafl.events.conversation_events import ConversationEvents from wafl.interface.voice_interface import VoiceInterface from wafl.logger.local_file_logger import LocalFileLogger -from wafl.scheduler.conversation_loop import ConversationLoop +from wafl.scheduler.conversation_handler import ConversationHandler from wafl.scheduler.scheduler import Scheduler _logger = LocalFileLogger() @@ -16,7 +16,7 @@ def run_from_audio(): interface=interface, logger=_logger, ) - conversation_loop = ConversationLoop( + conversation_loop = ConversationHandler( interface, conversation_events, _logger, diff --git a/wafl/runners/run_web_and_audio_interface.py b/wafl/runners/run_web_and_audio_interface.py index 0c22c092..ed01a6e8 100644 --- a/wafl/runners/run_web_and_audio_interface.py +++ b/wafl/runners/run_web_and_audio_interface.py @@ -7,14 +7,13 @@ from wafl.interface.list_interface import ListInterface from wafl.interface.voice_interface import VoiceInterface from wafl.scheduler.scheduler import Scheduler -from wafl.scheduler.web_loop import WebLoop -from wafl.scheduler.conversation_loop import ConversationLoop +from wafl.scheduler.conversation_handler import ConversationHandler from wafl.logger.local_file_logger import LocalFileLogger from wafl.events.conversation_events import ConversationEvents from wafl.interface.queue_interface import QueueInterface from wafl.config import Configuration from wafl.runners.routes import get_app, add_new_rules - +from wafl.scheduler.web_handler import WebHandler app = get_app() _logger = LocalFileLogger() @@ -43,13 +42,13 @@ def create_scheduler_and_webserver_loop(conversation_id): interface=interface, logger=_logger, ) - conversation_loop = ConversationLoop( + conversation_loop = ConversationHandler( interface, conversation_events, _logger, activation_word=config.get_value("waking_up_word"), ) - web_loop = WebLoop(interface, conversation_id, conversation_events) + web_loop = WebHandler(interface, conversation_id, conversation_events) return { "scheduler": Scheduler([conversation_loop, web_loop]), "web_server_loop": web_loop, diff --git a/wafl/runners/run_web_interface.py b/wafl/runners/run_web_interface.py index 1ad3d142..bb75ddc9 100644 --- a/wafl/runners/run_web_interface.py +++ b/wafl/runners/run_web_interface.py @@ -6,8 +6,8 @@ from flask import render_template, redirect, url_for from wafl.scheduler.scheduler import Scheduler -from wafl.scheduler.web_loop import WebLoop -from wafl.scheduler.conversation_loop import ConversationLoop +from wafl.scheduler.web_handler import WebHandler +from wafl.scheduler.conversation_handler import ConversationHandler from wafl.logger.local_file_logger import LocalFileLogger from wafl.events.conversation_events import ConversationEvents from wafl.interface.queue_interface import QueueInterface @@ -42,7 +42,7 @@ def create_scheduler_and_webserver_loop(conversation_id): interface=interface, logger=_logger, ) - conversation_loop = ConversationLoop( + conversation_loop = ConversationHandler( interface, conversation_events, _logger, @@ -51,7 +51,7 @@ def create_scheduler_and_webserver_loop(conversation_id): deactivate_on_closed_conversation=False, ) asyncio.run(interface.output("Hello. How may I help you?")) - web_loop = WebLoop(interface, conversation_id, conversation_events) + web_loop = WebHandler(interface, conversation_id, conversation_events) return { "scheduler": Scheduler([conversation_loop, web_loop]), "web_server_loop": web_loop, diff --git a/wafl/scheduler/conversation_loop.py b/wafl/scheduler/conversation_handler.py similarity index 99% rename from wafl/scheduler/conversation_loop.py rename to wafl/scheduler/conversation_handler.py index 3b9dfe09..ccb4d1a9 100644 --- a/wafl/scheduler/conversation_loop.py +++ b/wafl/scheduler/conversation_handler.py @@ -5,7 +5,7 @@ from wafl.exceptions import CloseConversation -class ConversationLoop: +class ConversationHandler: def __init__( self, interface, diff --git a/wafl/scheduler/generated_event_loop.py b/wafl/scheduler/generated_event_handler.py similarity index 95% rename from wafl/scheduler/generated_event_loop.py rename to wafl/scheduler/generated_event_handler.py index 21d1295d..c3563ce0 100644 --- a/wafl/scheduler/generated_event_loop.py +++ b/wafl/scheduler/generated_event_handler.py @@ -1,7 +1,7 @@ import asyncio -class GeneratedEventLoop: +class GeneratedEventHandler: def __init__(self, interface, events, logger): self._interface = interface self._events = events diff --git a/wafl/scheduler/web_loop.py b/wafl/scheduler/web_handler.py similarity index 99% rename from wafl/scheduler/web_loop.py rename to wafl/scheduler/web_handler.py index 2c16a416..f28b6d82 100644 --- a/wafl/scheduler/web_loop.py +++ b/wafl/scheduler/web_handler.py @@ -9,7 +9,7 @@ _path = os.path.dirname(__file__) -class WebLoop: +class WebHandler: def __init__( self, interface: BaseInterface, From fc0529941e3b0fde04723f69fb6ec131f7070e3a Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Fri, 24 May 2024 12:01:30 +0100 Subject: [PATCH 06/16] refactored last_strings on the wafl_llm side --- tests/test_prompts.py | 26 +++++++++++++++++++ todo.txt | 2 ++ wafl/connectors/base_llm_connector.py | 12 ++++----- .../connectors/remote/remote_llm_connector.py | 3 +-- wafl/connectors/utils.py | 4 +-- wafl/interface/base_interface.py | 4 +-- wafl/interface/conversation.py | 2 +- 7 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 tests/test_prompts.py diff --git a/tests/test_prompts.py b/tests/test_prompts.py new file mode 100644 index 00000000..38491e94 --- /dev/null +++ b/tests/test_prompts.py @@ -0,0 +1,26 @@ +import os +from unittest import TestCase + +from wafl.interface.conversation import Conversation, Utterance +from wafl.connectors.prompt import Prompt, PrompCreator + +_path = os.path.dirname(__file__) + + +class TestPrompts(TestCase): + def test_utterance(self): + utterance = Utterance(text="Hello", speaker="user", timestamp="2022-01-01T00:00:00") + self.assertEqual(utterance.to_dict(), {"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}) + + def test_conversation(self): + utterance1 = Utterance(text="Hello", speaker="user", timestamp="2022-01-01T00:00:00") + utterance2 = Utterance(text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01") + conversation = Conversation(utterances=[utterance1, utterance2]) + self.assertEqual(conversation.to_dict(), {"utterances": [{"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}, {"text": "Hi", "speaker": "bot", "timestamp": "2022-01-01T00:00:01"}]}) + + def test_prompt(self): + utterance1 = Utterance(text="Hello", speaker="user", timestamp="2022-01-01T00:00:00") + utterance2 = Utterance(text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01") + conversation = Conversation(utterances=[utterance1, utterance2]) + prompt = PrompCreator.create(system_prompt="Hello", conversation=conversation) + self.assertEqual(prompt.to_dict(), {"system_prompt": "Hello", "conversation": {"utterances": [{"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}, {"text": "Hi", "speaker": "bot", "timestamp": "2022-01-01T00:00:01"}]}}) diff --git a/todo.txt b/todo.txt index cf0025b1..49ae4284 100644 --- a/todo.txt +++ b/todo.txt @@ -1,3 +1,5 @@ +* substitute utterances in base_interface with the conversation class + * add config file for model names - llm model name - whisper model name diff --git a/wafl/connectors/base_llm_connector.py b/wafl/connectors/base_llm_connector.py index 1bd57a63..695715fd 100644 --- a/wafl/connectors/base_llm_connector.py +++ b/wafl/connectors/base_llm_connector.py @@ -15,9 +15,9 @@ class BaseLLMConnector: _num_prediction_tokens = 200 _cache = {} - def __init__(self, last_strings=None): - if not last_strings: - self._last_strings = [ + def __init__(self, important_strings=None): + if not important_strings: + self._important_strings = [ "\nuser", "\nbot", "<|EOS|>", @@ -27,7 +27,7 @@ def __init__(self, last_strings=None): ] else: - self._last_strings = last_strings + self._important_strings = important_strings async def predict(self, prompt: str) -> [str]: raise NotImplementedError @@ -38,10 +38,10 @@ async def generate(self, prompt: str) -> str: text = prompt start = len(text) - text += select_best_answer(await self.predict(text), self._last_strings) + text += select_best_answer(await self.predict(text), self._important_strings) end_set = set() - for item in self._last_strings: + for item in self._important_strings: if "" in item or "" in item: continue diff --git a/wafl/connectors/remote/remote_llm_connector.py b/wafl/connectors/remote/remote_llm_connector.py index e9e3c36e..6cfe9c11 100644 --- a/wafl/connectors/remote/remote_llm_connector.py +++ b/wafl/connectors/remote/remote_llm_connector.py @@ -48,7 +48,6 @@ async def predict( "data": prompt, "temperature": temperature, "num_tokens": num_tokens, - "last_strings": self._last_strings, "num_replicas": num_replicas, } @@ -71,7 +70,7 @@ async def check_connection(self): "data": "test", "temperature": 0.6, "num_tokens": 1, - "last_strings": self._last_strings, + "last_strings": self._important_strings, "num_replicas": self._num_replicas, } try: diff --git a/wafl/connectors/utils.py b/wafl/connectors/utils.py index 6bab650d..b3dcf8d5 100644 --- a/wafl/connectors/utils.py +++ b/wafl/connectors/utils.py @@ -1,6 +1,6 @@ -def select_best_answer(answers, last_strings): +def select_best_answer(answers, important_strings): special_words = ( - last_strings + important_strings + ["", "", "result ="] + ["", "", "", ""] ) diff --git a/wafl/interface/base_interface.py b/wafl/interface/base_interface.py index cb54f27c..0c982f26 100644 --- a/wafl/interface/base_interface.py +++ b/wafl/interface/base_interface.py @@ -1,6 +1,6 @@ import time -from typing import List +from wafl.interface.conversation import Conversation class BaseInterface: @@ -8,7 +8,7 @@ def __init__(self, decorator=None): self._is_listening = True self._choices = [] self._facts = [] - self._utterances = [] + self._utterances = Conversation() #### USE THIS AND CHANGE CODE ACCORDINGLY self._decorator = decorator async def output(self, text: str, silent: bool = False): diff --git a/wafl/interface/conversation.py b/wafl/interface/conversation.py index de794e3d..f9c53a42 100644 --- a/wafl/interface/conversation.py +++ b/wafl/interface/conversation.py @@ -14,7 +14,7 @@ def to_dict(self): @dataclass class Conversation: - utterances: List[Utterance] = [] + utterances: List[Utterance] = None def to_dict(self): return {"utterances": [utterance.to_dict() for utterance in self.utterances]} From 94920c78b33b434f7b85293559a45675b5df2fd1 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Fri, 24 May 2024 17:47:59 +0100 Subject: [PATCH 07/16] Added prompt template to dialogue answerer --- tests/test_prompts.py | 66 ++++++++++++++++--- wafl/answerer/dialogue_answerer.py | 1 - wafl/connectors/base_llm_connector.py | 11 ++-- .../clients/llm_chitchat_answer_client.py | 26 +++++--- .../{prompt.py => prompt_template.py} | 6 +- .../connectors/remote/remote_llm_connector.py | 9 ++- wafl/interface/base_interface.py | 11 ++-- wafl/interface/conversation.py | 15 ++++- 8 files changed, 106 insertions(+), 39 deletions(-) rename wafl/connectors/{prompt.py => prompt_template.py} (86%) diff --git a/tests/test_prompts.py b/tests/test_prompts.py index 38491e94..7369fb34 100644 --- a/tests/test_prompts.py +++ b/tests/test_prompts.py @@ -2,25 +2,73 @@ from unittest import TestCase from wafl.interface.conversation import Conversation, Utterance -from wafl.connectors.prompt import Prompt, PrompCreator +from wafl.connectors.prompt import PromptTemplate, PrompCreator _path = os.path.dirname(__file__) class TestPrompts(TestCase): def test_utterance(self): - utterance = Utterance(text="Hello", speaker="user", timestamp="2022-01-01T00:00:00") - self.assertEqual(utterance.to_dict(), {"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}) + utterance = Utterance( + text="Hello", speaker="user", timestamp="2022-01-01T00:00:00" + ) + self.assertEqual( + utterance.to_dict(), + {"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}, + ) def test_conversation(self): - utterance1 = Utterance(text="Hello", speaker="user", timestamp="2022-01-01T00:00:00") - utterance2 = Utterance(text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01") + utterance1 = Utterance( + text="Hello", speaker="user", timestamp="2022-01-01T00:00:00" + ) + utterance2 = Utterance( + text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01" + ) conversation = Conversation(utterances=[utterance1, utterance2]) - self.assertEqual(conversation.to_dict(), {"utterances": [{"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}, {"text": "Hi", "speaker": "bot", "timestamp": "2022-01-01T00:00:01"}]}) + self.assertEqual( + conversation.to_dict(), + { + "utterances": [ + { + "text": "Hello", + "speaker": "user", + "timestamp": "2022-01-01T00:00:00", + }, + { + "text": "Hi", + "speaker": "bot", + "timestamp": "2022-01-01T00:00:01", + }, + ] + }, + ) def test_prompt(self): - utterance1 = Utterance(text="Hello", speaker="user", timestamp="2022-01-01T00:00:00") - utterance2 = Utterance(text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01") + utterance1 = Utterance( + text="Hello", speaker="user", timestamp="2022-01-01T00:00:00" + ) + utterance2 = Utterance( + text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01" + ) conversation = Conversation(utterances=[utterance1, utterance2]) prompt = PrompCreator.create(system_prompt="Hello", conversation=conversation) - self.assertEqual(prompt.to_dict(), {"system_prompt": "Hello", "conversation": {"utterances": [{"text": "Hello", "speaker": "user", "timestamp": "2022-01-01T00:00:00"}, {"text": "Hi", "speaker": "bot", "timestamp": "2022-01-01T00:00:01"}]}}) + self.assertEqual( + prompt.to_dict(), + { + "system_prompt": "Hello", + "conversation": { + "utterances": [ + { + "text": "Hello", + "speaker": "user", + "timestamp": "2022-01-01T00:00:00", + }, + { + "text": "Hi", + "speaker": "bot", + "timestamp": "2022-01-01T00:00:01", + }, + ] + }, + }, + ) diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index c71da1bc..f309da00 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -86,7 +86,6 @@ async def answer(self, query_text): original_answer_text = await self._bridge.get_answer( text=facts, dialogue=dialogue_items, - query=rules_texts, ) await self._interface.add_fact(f"The bot predicts: {original_answer_text}") ( diff --git a/wafl/connectors/base_llm_connector.py b/wafl/connectors/base_llm_connector.py index 695715fd..122f6127 100644 --- a/wafl/connectors/base_llm_connector.py +++ b/wafl/connectors/base_llm_connector.py @@ -32,20 +32,17 @@ def __init__(self, important_strings=None): async def predict(self, prompt: str) -> [str]: raise NotImplementedError - async def generate(self, prompt: str) -> str: + async def generate(self, prompt: "PromptTemplate") -> str: if prompt in self._cache: return self._cache[prompt] - text = prompt - start = len(text) - text += select_best_answer(await self.predict(text), self._important_strings) - + text = select_best_answer(await self.predict(prompt), self._important_strings) end_set = set() for item in self._important_strings: if "" in item or "" in item: continue - end_set.add(text.find(item, start)) + end_set.add(text.find(item)) if -1 in end_set: end_set.remove(-1) @@ -54,7 +51,7 @@ async def generate(self, prompt: str) -> str: if end_set: end = min(end_set) - candidate_answer = text[start:end].strip() + candidate_answer = text[:end].strip() candidate_answer = re.sub(r"(.*)<\|.*\|>", r"\1", candidate_answer).strip() if prompt not in self._cache: diff --git a/wafl/connectors/clients/llm_chitchat_answer_client.py b/wafl/connectors/clients/llm_chitchat_answer_client.py index e9e08f66..984464f9 100644 --- a/wafl/connectors/clients/llm_chitchat_answer_client.py +++ b/wafl/connectors/clients/llm_chitchat_answer_client.py @@ -1,6 +1,8 @@ import os +import textwrap from wafl.connectors.factories.llm_connector_factory import LLMConnectorFactory +from wafl.connectors.prompt_template import PromptTemplate _path = os.path.dirname(__file__) @@ -10,12 +12,20 @@ def __init__(self, config): self._connector = LLMConnectorFactory.get_connector(config) self._config = config - async def get_answer(self, text: str, dialogue: str, query: str) -> str: - prompt = await self._get_answer_prompt(text, query, dialogue) + async def get_answer(self, text: str, dialogue: str) -> str: + prompt = await self._get_answer_prompt(text, dialogue) return await self._connector.generate(prompt) - async def _get_answer_prompt(self, text, rules_text, dialogue=None): - prompt = f""" + async def _get_answer_prompt( + self, text: str, dialogue: "Conversation" = None + ) -> PromptTemplate: + return PromptTemplate( + system_prompt=self._get_prompt_text(text), conversation=dialogue + ) + + def _get_prompt_text(self, text): + return textwrap.dedent( + f""" The following is a summary of a conversation. All the elements of the conversation are described briefly: A user is chatting with a bot. The chat is happening through a web interface. The user is typing the messages and the bot is replying. @@ -27,9 +37,5 @@ async def _get_answer_prompt(self, text, rules_text, dialogue=None): Do not repeat yourself. Be friendly but not too servile. Wrap any code or html you output in the with the markdown syntax for code blocks (i.e. use triple backticks ```) unless it is between tags. - -This is the dialogue: -{dialogue} -bot: - """.strip() - return prompt +""" + ) diff --git a/wafl/connectors/prompt.py b/wafl/connectors/prompt_template.py similarity index 86% rename from wafl/connectors/prompt.py rename to wafl/connectors/prompt_template.py index 40e7d90d..f45a96e7 100644 --- a/wafl/connectors/prompt.py +++ b/wafl/connectors/prompt_template.py @@ -2,7 +2,7 @@ @dataclass -class Prompt: +class PromptTemplate: system_prompt: str = "" conversation: "Conversation" = None @@ -15,8 +15,8 @@ def to_dict(self): class PrompCreator: @staticmethod - def create(system_prompt: str, conversation: "Conversation") -> Prompt: - prompt = Prompt() + def create(system_prompt: str, conversation: "Conversation") -> PromptTemplate: + prompt = PromptTemplate() prompt.system_prompt = system_prompt prompt.conversation = conversation return prompt diff --git a/wafl/connectors/remote/remote_llm_connector.py b/wafl/connectors/remote/remote_llm_connector.py index 6cfe9c11..12f7ab60 100644 --- a/wafl/connectors/remote/remote_llm_connector.py +++ b/wafl/connectors/remote/remote_llm_connector.py @@ -4,6 +4,7 @@ import asyncio from wafl.connectors.base_llm_connector import BaseLLMConnector +from wafl.connectors.prompt_template import PromptTemplate from wafl.variables import is_supported @@ -33,7 +34,11 @@ def __init__(self, config, last_strings=None, num_replicas=3): raise RuntimeError("Cannot connect a running LLM.") async def predict( - self, prompt: str, temperature=None, num_tokens=None, num_replicas=None + self, + prompt: PromptTemplate, + temperature=None, + num_tokens=None, + num_replicas=None, ) -> [str]: if not temperature: temperature = self._default_temperature @@ -45,7 +50,7 @@ async def predict( num_replicas = self._num_replicas payload = { - "data": prompt, + "data": prompt.to_dict(), "temperature": temperature, "num_tokens": num_tokens, "num_replicas": num_replicas, diff --git a/wafl/interface/base_interface.py b/wafl/interface/base_interface.py index 0c982f26..6ebbf7b6 100644 --- a/wafl/interface/base_interface.py +++ b/wafl/interface/base_interface.py @@ -1,6 +1,6 @@ import time -from wafl.interface.conversation import Conversation +from wafl.interface.conversation import Conversation, Utterance class BaseInterface: @@ -8,7 +8,7 @@ def __init__(self, decorator=None): self._is_listening = True self._choices = [] self._facts = [] - self._utterances = Conversation() #### USE THIS AND CHANGE CODE ACCORDINGLY + self._utterances = Conversation() self._decorator = decorator async def output(self, text: str, silent: bool = False): @@ -70,7 +70,6 @@ def _decorate_reply(self, text: str) -> str: return self._decorator.extract(text, self._utterances) def _insert_utterance(self, speaker, text: str): - if self._utterances == [] or text != self._utterances[-1][1].replace( - f"{speaker}: ", "" - ): - self._utterances.append((time.time(), f"{speaker}: {text}")) + self._utterances.add_utterance( + Utterance(text=text, speaker=speaker, timestamp=time.time()) + ) diff --git a/wafl/interface/conversation.py b/wafl/interface/conversation.py index f9c53a42..984445c4 100644 --- a/wafl/interface/conversation.py +++ b/wafl/interface/conversation.py @@ -6,7 +6,7 @@ class Utterance: text: str speaker: str - timestamp: str + timestamp: float def to_dict(self): return {"text": self.text, "speaker": self.speaker, "timestamp": self.timestamp} @@ -16,5 +16,18 @@ def to_dict(self): class Conversation: utterances: List[Utterance] = None + def add_utterance(self, utterance: Utterance): + if self.utterances is None: + self.utterances = [] + + if ( + len(self.utterances) > 0 + and utterance.text == self.utterances[-1].text + and utterance.speaker == self.utterances[-1].speaker + ): + return + + self.utterances.append(utterance) + def to_dict(self): return {"utterances": [utterance.to_dict() for utterance in self.utterances]} From 305b3b35ded8cddd9463fd1f8a5d7bc9767b1a64 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Sat, 25 May 2024 09:20:30 +0100 Subject: [PATCH 08/16] sending prompt template to the backend --- tests/config.json | 3 +- tests/test_connection.py | 21 ++++++++++---- tests/test_prompts.py | 2 +- .../clients/llm_chitchat_answer_client.py | 28 +++++++++---------- wafl/connectors/prompt_template.py | 15 ++++++++-- .../connectors/remote/remote_llm_connector.py | 5 +++- wafl/interface/conversation.py | 13 ++++++++- 7 files changed, 61 insertions(+), 26 deletions(-) diff --git a/tests/config.json b/tests/config.json index c78c40b3..09c727b1 100644 --- a/tests/config.json +++ b/tests/config.json @@ -7,7 +7,8 @@ "max_recursion": 2, "llm_model": { "model_host": "localhost", - "model_port": 8080 + "model_port": 8080, + "temperature": 0.4 }, "listener_model": { "model_host": "localhost", diff --git a/tests/test_connection.py b/tests/test_connection.py index 854c223d..69181dbd 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -3,6 +3,7 @@ from unittest import TestCase from wafl.config import Configuration +from wafl.connectors.prompt_template import PromptCreator from wafl.connectors.remote.remote_llm_connector import RemoteLLMConnector _path = os.path.dirname(__file__) @@ -14,8 +15,10 @@ def test__connection_to_generative_model_can_generate_text(self): connector = RemoteLLMConnector(config.get_value("llm_model")) prediction = asyncio.run( connector.predict( - 'Generate a full paragraph based on this chapter title "The first contact". ' - "The theme of the paragraph is space opera. " + PromptCreator.create_from_one_instruction( + 'Generate a full paragraph based on this chapter title "The first contact".' + "The theme of the paragraph is space opera. " + ) ) ) assert len(prediction) > 0 @@ -32,8 +35,11 @@ def test__connection_to_generative_model_can_generate_text_within_tags(self): """.strip() - prediction = asyncio.run(connector.predict(prompt)) - print(prediction) + prediction = asyncio.run( + connector.predict( + PromptCreator.create_from_one_instruction(prompt) + ) + ) assert len(prediction) > 0 def test__connection_to_generative_model_can_generate_a_python_list(self): @@ -41,6 +47,9 @@ def test__connection_to_generative_model_can_generate_a_python_list(self): connector = RemoteLLMConnector(config.get_value("llm_model")) connector._num_prediction_tokens = 200 prompt = "Generate a Python list of 4 chapters names for a space opera book. The output needs to be a python list of strings: " - prediction = asyncio.run(connector.predict(prompt)) - print(prediction) + prediction = asyncio.run( + connector.predict( + PromptCreator.create_from_one_instruction(prompt) + ) + ) assert len(prediction) > 0 diff --git a/tests/test_prompts.py b/tests/test_prompts.py index 7369fb34..4600dd8b 100644 --- a/tests/test_prompts.py +++ b/tests/test_prompts.py @@ -2,7 +2,7 @@ from unittest import TestCase from wafl.interface.conversation import Conversation, Utterance -from wafl.connectors.prompt import PromptTemplate, PrompCreator +from wafl.connectors.prompt_template import PrompCreator _path = os.path.dirname(__file__) diff --git a/wafl/connectors/clients/llm_chitchat_answer_client.py b/wafl/connectors/clients/llm_chitchat_answer_client.py index 984464f9..b798b8e9 100644 --- a/wafl/connectors/clients/llm_chitchat_answer_client.py +++ b/wafl/connectors/clients/llm_chitchat_answer_client.py @@ -20,22 +20,22 @@ async def _get_answer_prompt( self, text: str, dialogue: "Conversation" = None ) -> PromptTemplate: return PromptTemplate( - system_prompt=self._get_prompt_text(text), conversation=dialogue + system_prompt=self._get_system_prompt(text), conversation=dialogue ) - def _get_prompt_text(self, text): + def _get_system_prompt(self, text): return textwrap.dedent( f""" -The following is a summary of a conversation. All the elements of the conversation are described briefly: - -A user is chatting with a bot. The chat is happening through a web interface. The user is typing the messages and the bot is replying. -{text.strip()} - - - -Create a plausible dialogue based on the aforementioned summary and rules. -Do not repeat yourself. Be friendly but not too servile. -Wrap any code or html you output in the with the markdown syntax for code blocks (i.e. use triple backticks ```) unless it is between tags. - -""" + The following is a summary of a conversation. All the elements of the conversation are described briefly: + + A user is chatting with a bot. The chat is happening through a web interface. The user is typing the messages and the bot is replying. + {text.strip()} + + + + Create a plausible dialogue based on the aforementioned summary and rules. + Do not repeat yourself. Be friendly but not too servile. + Wrap any code or html you output in the with the markdown syntax for code blocks (i.e. use triple backticks ```) unless it is between tags. + + """ ) diff --git a/wafl/connectors/prompt_template.py b/wafl/connectors/prompt_template.py index f45a96e7..ad989766 100644 --- a/wafl/connectors/prompt_template.py +++ b/wafl/connectors/prompt_template.py @@ -1,5 +1,7 @@ from dataclasses import dataclass +from wafl.interface.conversation import Conversation, Utterance + @dataclass class PromptTemplate: @@ -13,10 +15,19 @@ def to_dict(self): } -class PrompCreator: +class PromptCreator: @staticmethod - def create(system_prompt: str, conversation: "Conversation") -> PromptTemplate: + def create(system_prompt: str, conversation: Conversation) -> PromptTemplate: prompt = PromptTemplate() prompt.system_prompt = system_prompt prompt.conversation = conversation return prompt + + @staticmethod + def create_from_one_instruction(instruction: str) -> PromptTemplate: + return PromptTemplate( + system_prompt="", + conversation=Conversation( + utterances=[Utterance(speaker="user", text=instruction)] + ), + ) diff --git a/wafl/connectors/remote/remote_llm_connector.py b/wafl/connectors/remote/remote_llm_connector.py index 12f7ab60..75e55232 100644 --- a/wafl/connectors/remote/remote_llm_connector.py +++ b/wafl/connectors/remote/remote_llm_connector.py @@ -72,7 +72,10 @@ async def predict( async def check_connection(self): payload = { - "data": "test", + "data": { + "system_prompt": "Hello!", + "conversation": [{"speaker": "user", "text": "Hi!"}], + }, "temperature": 0.6, "num_tokens": 1, "last_strings": self._important_strings, diff --git a/wafl/interface/conversation.py b/wafl/interface/conversation.py index 984445c4..72ce8e14 100644 --- a/wafl/interface/conversation.py +++ b/wafl/interface/conversation.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from datetime import datetime from typing import List @@ -8,6 +9,13 @@ class Utterance: speaker: str timestamp: float + def __init__(self, text: str, speaker: str, timestamp: float = None): + self.text = text + self.speaker = speaker + self.timestamp = timestamp + if self.timestamp is None: + self.timestamp = datetime.now().timestamp() + def to_dict(self): return {"text": self.text, "speaker": self.speaker, "timestamp": self.timestamp} @@ -16,6 +24,9 @@ def to_dict(self): class Conversation: utterances: List[Utterance] = None + def __init__(self, utterances: List[Utterance] = None): + self.utterances = utterances + def add_utterance(self, utterance: Utterance): if self.utterances is None: self.utterances = [] @@ -30,4 +41,4 @@ def add_utterance(self, utterance: Utterance): self.utterances.append(utterance) def to_dict(self): - return {"utterances": [utterance.to_dict() for utterance in self.utterances]} + return [utterance.to_dict() for utterance in self.utterances] From e9ff8e8f58c6b1d0a203c62a99738980ce263c2e Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Sat, 25 May 2024 12:16:26 +0100 Subject: [PATCH 09/16] initial conversation mechanism with phi3 --- tests/test_closing_conversation.py | 2 +- tests/test_connection.py | 9 +- tests/test_prompts.py | 64 +++++----- tests/test_rules.py | 4 +- tests/test_voice.py | 3 +- wafl/answerer/answerer_implementation.py | 18 --- wafl/answerer/dialogue_answerer.py | 120 ++++++++---------- wafl/answerer/entailer.py | 41 +++--- wafl/connectors/base_llm_connector.py | 8 +- .../clients/llm_chitchat_answer_client.py | 39 +++--- wafl/connectors/prompt_template.py | 2 +- .../connectors/remote/remote_llm_connector.py | 2 +- wafl/events/conversation_events.py | 3 +- wafl/interface/base_interface.py | 15 ++- wafl/interface/conversation.py | 67 ++++++++++ wafl/interface/dummy_interface.py | 5 +- wafl/interface/queue_interface.py | 4 +- wafl/interface/voice_interface.py | 4 +- wafl/scheduler/messages_creator.py | 19 +-- wafl/templates/functions.py | 2 +- 20 files changed, 222 insertions(+), 209 deletions(-) delete mode 100644 wafl/answerer/answerer_implementation.py diff --git a/tests/test_closing_conversation.py b/tests/test_closing_conversation.py index 2e9fef85..c28c7700 100644 --- a/tests/test_closing_conversation.py +++ b/tests/test_closing_conversation.py @@ -30,7 +30,7 @@ def test__thank_you_closes_conversation(self): ) try: asyncio.run(conversation_events.process_next()) - + print(interface.get_utterances_list()) except CloseConversation: self.assertTrue(True) return diff --git a/tests/test_connection.py b/tests/test_connection.py index 69181dbd..317a91fd 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -36,10 +36,9 @@ def test__connection_to_generative_model_can_generate_text_within_tags(self): """.strip() prediction = asyncio.run( - connector.predict( - PromptCreator.create_from_one_instruction(prompt) - ) + connector.predict(PromptCreator.create_from_one_instruction(prompt)) ) + print(prediction) assert len(prediction) > 0 def test__connection_to_generative_model_can_generate_a_python_list(self): @@ -48,8 +47,6 @@ def test__connection_to_generative_model_can_generate_a_python_list(self): connector._num_prediction_tokens = 200 prompt = "Generate a Python list of 4 chapters names for a space opera book. The output needs to be a python list of strings: " prediction = asyncio.run( - connector.predict( - PromptCreator.create_from_one_instruction(prompt) - ) + connector.predict(PromptCreator.create_from_one_instruction(prompt)) ) assert len(prediction) > 0 diff --git a/tests/test_prompts.py b/tests/test_prompts.py index 4600dd8b..55928c17 100644 --- a/tests/test_prompts.py +++ b/tests/test_prompts.py @@ -1,8 +1,8 @@ import os from unittest import TestCase +from wafl.connectors.prompt_template import PromptCreator from wafl.interface.conversation import Conversation, Utterance -from wafl.connectors.prompt_template import PrompCreator _path = os.path.dirname(__file__) @@ -19,56 +19,52 @@ def test_utterance(self): def test_conversation(self): utterance1 = Utterance( - text="Hello", speaker="user", timestamp="2022-01-01T00:00:00" + text="Hello", speaker="user", timestamp=2 ) utterance2 = Utterance( - text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01" + text="Hi", speaker="bot", timestamp=1 ) conversation = Conversation(utterances=[utterance1, utterance2]) self.assertEqual( conversation.to_dict(), - { - "utterances": [ - { - "text": "Hello", - "speaker": "user", - "timestamp": "2022-01-01T00:00:00", - }, - { - "text": "Hi", - "speaker": "bot", - "timestamp": "2022-01-01T00:00:01", - }, - ] - }, + [ + { + "text": "Hello", + "speaker": "user", + "timestamp": 2, + }, + { + "text": "Hi", + "speaker": "bot", + "timestamp": 1, + }, + ] ) def test_prompt(self): utterance1 = Utterance( - text="Hello", speaker="user", timestamp="2022-01-01T00:00:00" + text="Hello", speaker="user", timestamp=2 ) utterance2 = Utterance( - text="Hi", speaker="bot", timestamp="2022-01-01T00:00:01" + text="Hi", speaker="bot", timestamp=1 ) conversation = Conversation(utterances=[utterance1, utterance2]) - prompt = PrompCreator.create(system_prompt="Hello", conversation=conversation) + prompt = PromptCreator.create(system_prompt="Hello", conversation=conversation) self.assertEqual( prompt.to_dict(), { "system_prompt": "Hello", - "conversation": { - "utterances": [ - { - "text": "Hello", - "speaker": "user", - "timestamp": "2022-01-01T00:00:00", - }, - { - "text": "Hi", - "speaker": "bot", - "timestamp": "2022-01-01T00:00:01", - }, - ] - }, + "conversation": [ + { + "text": "Hello", + "speaker": "user", + "timestamp": 2, + }, + { + "text": "Hi", + "speaker": "bot", + "timestamp": 1, + }, + ] }, ) diff --git a/tests/test_rules.py b/tests/test_rules.py index 7be70da6..9a901f51 100644 --- a/tests/test_rules.py +++ b/tests/test_rules.py @@ -39,8 +39,8 @@ def test__rules_can_be_triggered(self): interface=interface, ) asyncio.run(conversation_events.process_next()) - expected = "bot: the horse is tall" - self.assertEqual(expected, interface.get_utterances_list()[-1]) + expected = "The horse is tall" + self.assertIn(expected, interface.get_utterances_list()[-1]) def test__rules_are_not_always_triggered(self): interface = DummyInterface( diff --git a/tests/test_voice.py b/tests/test_voice.py index 4c07d77e..6ecd785b 100644 --- a/tests/test_voice.py +++ b/tests/test_voice.py @@ -30,7 +30,8 @@ def test__activation(self): interface.activate() asyncio.run(conversation_events.process_next(activation_word="computer")) asyncio.run(conversation_events.process_next(activation_word="computer")) - assert interface.get_utterances_list()[-1] == "bot: I hear you" + print(interface.get_utterances_list()) + assert "I hear you" in interface.get_utterances_list()[-1] def test__no_activation(self): interface = DummyInterface(to_utter=["my name is bob"]) diff --git a/wafl/answerer/answerer_implementation.py b/wafl/answerer/answerer_implementation.py deleted file mode 100644 index 0bd2d446..00000000 --- a/wafl/answerer/answerer_implementation.py +++ /dev/null @@ -1,18 +0,0 @@ -def get_last_bot_utterances(dialogue_items, num_utterances): - utterances = [] - for item in reversed(dialogue_items): - if item[1].startswith("bot:"): - utterances.append(item[1].replace("bot:", "").strip()) - - if len(utterances) == num_utterances: - break - - return utterances - - -def get_last_user_utterance(dialogue_items): - for item in reversed(dialogue_items): - if item[1].startswith("user:"): - return item[1].replace("user:", "").strip() - - return "" diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index f309da00..a0231361 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -1,19 +1,14 @@ import re -import time import traceback from importlib import import_module from inspect import getmembers, isfunction - -from wafl.answerer.answerer_implementation import ( - get_last_bot_utterances, - get_last_user_utterance, -) from wafl.answerer.base_answerer import BaseAnswerer from wafl.answerer.rule_maker import RuleMaker from wafl.connectors.clients.llm_chitchat_answer_client import LLMChitChatAnswerClient from wafl.exceptions import CloseConversation from wafl.extractors.dataclasses import Query, Answer +from wafl.interface.conversation import Conversation, Utterance from wafl.simple_text_processing.questions import is_question @@ -29,13 +24,13 @@ def __init__(self, config, knowledge, interface, code_path, logger): self._max_num_past_utterances_for_rules = 0 self._prior_facts_with_timestamp = [] self._init_python_module(code_path.replace(".py", "")) - self._prior_rule_with_timestamp = None + self._prior_rules = None self._max_predictions = 3 self._rule_creator = RuleMaker( knowledge, config, interface, - max_num_rules=1, + max_num_rules=2, delete_current_rule=self._delete_current_rule, ) @@ -44,48 +39,37 @@ async def answer(self, query_text): self._logger.write(f"Dialogue Answerer: the query is {query_text}") query = Query.create_from_text("The user says: " + query_text) - rules_texts = await self._get_relevant_rules(query) - dialogue = self._interface.get_utterances_list_with_timestamp()[ - -self._max_num_past_utterances : - ] - start_time = -1 - if dialogue: - start_time = dialogue[0][0] - - if not dialogue: - dialogue = [(time.time(), f"user: {query_text}")] - - dialogue_items = dialogue - dialogue_items = sorted(dialogue_items, key=lambda x: x[0]) - if rules_texts: - last_timestamp = dialogue_items[-1][0] - self._prior_rule_with_timestamp = (last_timestamp, rules_texts) - dialogue_items = self._insert_rule_into_dialogue_items( - rules_texts, last_timestamp, dialogue_items + rules_text = await self._get_relevant_rules(query) + conversation = self._interface.get_utterances_list_with_timestamp().get_last_n( + self._max_num_past_utterances + ) + if not conversation: + conversation = Conversation( + [ + Utterance( + query_text, + "user", + ) + ] ) - elif self._prior_rule_with_timestamp: - last_timestamp = self._prior_rule_with_timestamp[0] - rules_texts = self._prior_rule_with_timestamp[1] - dialogue_items = self._insert_rule_into_dialogue_items( - rules_texts, last_timestamp, dialogue_items - ) + last_bot_utterances = conversation.get_last_speaker_utterances("bot", 3) + last_user_utterance = conversation.get_last_speaker_utterances("user", 1) + if not last_user_utterance: + last_user_utterance = query_text - last_bot_utterances = get_last_bot_utterances(dialogue_items, num_utterances=3) - last_user_utterance = get_last_user_utterance(dialogue_items) - dialogue_items = [item[1] for item in dialogue_items if item[0] >= start_time] - conversational_timestamp = len(dialogue_items) + conversational_timestamp = len(conversation) facts = await self._get_relevant_facts( query, - has_prior_rules=bool(rules_texts), + has_prior_rules=bool(rules_text), conversational_timestamp=conversational_timestamp, ) - dialogue_items = "\n".join(dialogue_items) for _ in range(self._max_predictions): original_answer_text = await self._bridge.get_answer( text=facts, - dialogue=dialogue_items, + rules_text=rules_text, + dialogue=conversation, ) await self._interface.add_fact(f"The bot predicts: {original_answer_text}") ( @@ -95,19 +79,36 @@ async def answer(self, query_text): await self._substitute_results_in_answer(original_answer_text) ) if answer_text in last_bot_utterances: - dialogue_items = last_user_utterance + conversation = Conversation( + [ + Utterance( + last_user_utterance[-1], + "user", + ) + ] + ) continue if self._delete_current_rule in answer_text: - self._prior_rule_with_timestamp = None - dialogue_items += f"\n{original_answer_text}" + self._prior_rules = None + conversation.add_utterance( + Utterance( + original_answer_text, + "bot", + ) + ) continue if not memories: break facts += "\n" + "\n".join(memories) - dialogue_items += f"\nbot: {original_answer_text}" + conversation.add_utterance( + Utterance( + original_answer_text, + "bot", + ) + ) if self._logger: self._logger.write(f"Answer within dialogue: The answer is {answer_text}") @@ -146,13 +147,15 @@ async def _get_relevant_facts( "The bot can answer the question while informing the user that the answer was not retrieved" ) - if has_prior_rules: - memory += f"\nThe bot tries to answer {query.text} following the rules from the user." - return memory async def _get_relevant_rules(self, query): - return await self._rule_creator.create_from_query(query) + rules = await self._rule_creator.create_from_query(query) + if not rules and self._prior_rules: + rules = self._prior_rules + self._prior_rules = rules + return rules + def _init_python_module(self, module_name): self._module = import_module(module_name) @@ -239,26 +242,3 @@ async def _run_code(self, to_execute): result = f"\n```python\n{to_execute}\n```" return result - - def _insert_rule_into_dialogue_items( - self, rules_texts, rule_timestamp, dialogue_items - ): - new_dialogue_items = [] - already_inserted = False - for timestamp, utterance in dialogue_items: - if ( - not already_inserted - and utterance.startswith("user:") - and rule_timestamp == timestamp - ): - new_dialogue_items.append( - ( - rule_timestamp, - f"user: I want you to follow these rules:\n{rules_texts}", - ) - ) - already_inserted = True - - new_dialogue_items.append((timestamp, utterance)) - - return new_dialogue_items diff --git a/wafl/answerer/entailer.py b/wafl/answerer/entailer.py index 0daad8f3..97bd4297 100644 --- a/wafl/answerer/entailer.py +++ b/wafl/answerer/entailer.py @@ -1,6 +1,9 @@ import os +import textwrap from wafl.connectors.factories.llm_connector_factory import LLMConnectorFactory +from wafl.connectors.prompt_template import PromptTemplate +from wafl.interface.conversation import Utterance, Conversation _path = os.path.dirname(__file__) @@ -10,37 +13,31 @@ def __init__(self, config): self._connector = LLMConnectorFactory.get_connector(config) self._config = config - async def left_entails_right(self, lhs: str, rhs: str, dialogue: str) -> str: + async def left_entails_right(self, lhs: str, rhs: str, dialogue) -> str: prompt = await self._get_answer_prompt(lhs, rhs, dialogue) result = await self._connector.generate(prompt) result = self._clean_result(result) return result == "yes" async def _get_answer_prompt(self, lhs, rhs, dialogue): - prompt = f""" - -This is a conversation between two agents ("bot" and "user"): -bot: what can I do for you? - -Given this dialogue, the task is to determine whether the following two utterances have the same meaning: -1) user: I need to book a flight to Paris. -2) user: I'd like to buy a plane ticket to paris. -Please answer "yes" or "no": yes - - - -This is a conversation between two agents ("bot" and "user"): -{dialogue} - -Given this dialogue, the task is to determine whether the following two utterances have the same meaning: -1) {lhs.lower()} -2) {rhs.lower()} -Please answer "yes" or "no": - """.strip() - return prompt + return PromptTemplate( + system_prompt="", + conversation=self._get_dialogue_prompt(lhs, rhs, dialogue), + ) def _clean_result(self, result): result = result.replace("", "") result = result.split("\n")[0] result = result.strip() return result.lower() + + def _get_dialogue_prompt(self, dialogue, lhs, rhs): + text = f""" +Your task is to determine whether two sentences are similar. +1) {lhs.lower()} +2) {rhs.lower()} +Please answer "yes" if the two sentences are similar or "no" if not: + """.strip() + return Conversation( + [Utterance(speaker="user", text=text)] + ) diff --git a/wafl/connectors/base_llm_connector.py b/wafl/connectors/base_llm_connector.py index 122f6127..832f98c1 100644 --- a/wafl/connectors/base_llm_connector.py +++ b/wafl/connectors/base_llm_connector.py @@ -33,8 +33,8 @@ async def predict(self, prompt: str) -> [str]: raise NotImplementedError async def generate(self, prompt: "PromptTemplate") -> str: - if prompt in self._cache: - return self._cache[prompt] + if str(prompt.to_dict()) in self._cache: + return self._cache[str(prompt.to_dict())] text = select_best_answer(await self.predict(prompt), self._important_strings) end_set = set() @@ -54,8 +54,8 @@ async def generate(self, prompt: "PromptTemplate") -> str: candidate_answer = text[:end].strip() candidate_answer = re.sub(r"(.*)<\|.*\|>", r"\1", candidate_answer).strip() - if prompt not in self._cache: - self._cache[prompt] = candidate_answer + if str(prompt.to_dict()) not in self._cache: + self._cache[str(prompt.to_dict())] = candidate_answer if not candidate_answer: candidate_answer = "unknown" diff --git a/wafl/connectors/clients/llm_chitchat_answer_client.py b/wafl/connectors/clients/llm_chitchat_answer_client.py index b798b8e9..b589c7ac 100644 --- a/wafl/connectors/clients/llm_chitchat_answer_client.py +++ b/wafl/connectors/clients/llm_chitchat_answer_client.py @@ -12,30 +12,29 @@ def __init__(self, config): self._connector = LLMConnectorFactory.get_connector(config) self._config = config - async def get_answer(self, text: str, dialogue: str) -> str: - prompt = await self._get_answer_prompt(text, dialogue) + async def get_answer(self, text: str, dialogue: str, rules_text: str) -> str: + prompt = await self._get_answer_prompt(text, dialogue, rules_text) return await self._connector.generate(prompt) async def _get_answer_prompt( - self, text: str, dialogue: "Conversation" = None + self, text: str, dialogue: "Conversation" = None, rules_text: str = None ) -> PromptTemplate: return PromptTemplate( - system_prompt=self._get_system_prompt(text), conversation=dialogue + system_prompt=self._get_system_prompt(text, rules_text), + conversation=dialogue, ) - def _get_system_prompt(self, text): - return textwrap.dedent( - f""" - The following is a summary of a conversation. All the elements of the conversation are described briefly: - - A user is chatting with a bot. The chat is happening through a web interface. The user is typing the messages and the bot is replying. - {text.strip()} - - - - Create a plausible dialogue based on the aforementioned summary and rules. - Do not repeat yourself. Be friendly but not too servile. - Wrap any code or html you output in the with the markdown syntax for code blocks (i.e. use triple backticks ```) unless it is between tags. - - """ - ) + def _get_system_prompt(self, text, rules_text): + return f""" +A user is chatting with a bot. The chat is happening through a web interface. The user is typing the messages and the bot is replying. + +This is summary of the bot's knowledge: +{text.strip()} + +The rules that *must* be followed are: +{rules_text.strip()} + +Create a plausible dialogue based on the aforementioned summary and rules. +Do not repeat yourself. Be friendly but not too servile. +Follow the rules if present and they apply to the dialogue. Do not improvise if rules are present. + """.strip() diff --git a/wafl/connectors/prompt_template.py b/wafl/connectors/prompt_template.py index ad989766..5f17cc9d 100644 --- a/wafl/connectors/prompt_template.py +++ b/wafl/connectors/prompt_template.py @@ -11,7 +11,7 @@ class PromptTemplate: def to_dict(self): return { "system_prompt": self.system_prompt, - "conversation": self.conversation.to_dict(), + "conversation": self.conversation.to_dict() if self.conversation else [], } diff --git a/wafl/connectors/remote/remote_llm_connector.py b/wafl/connectors/remote/remote_llm_connector.py index 75e55232..ae3403af 100644 --- a/wafl/connectors/remote/remote_llm_connector.py +++ b/wafl/connectors/remote/remote_llm_connector.py @@ -14,7 +14,7 @@ class RemoteLLMConnector(BaseLLMConnector): _num_prediction_tokens = 200 _cache = {} - def __init__(self, config, last_strings=None, num_replicas=3): + def __init__(self, config, last_strings=None, num_replicas=1): super().__init__(last_strings) host = config["model_host"] port = config["model_port"] diff --git a/wafl/events/conversation_events.py b/wafl/events/conversation_events.py index e00572f1..d6186d8a 100644 --- a/wafl/events/conversation_events.py +++ b/wafl/events/conversation_events.py @@ -1,6 +1,5 @@ import os import re -import traceback from wafl.events.answerer_creator import create_answerer from wafl.simple_text_processing.normalize import normalized @@ -61,7 +60,7 @@ async def _process_query(self, text: str): if ( not text_is_question and self._interface.get_utterances_list() - and self._interface.get_utterances_list()[-1].find("user:") == 0 + and self._interface.last_speaker() == "user" ): await self._interface.output("I don't know what to reply") diff --git a/wafl/interface/base_interface.py b/wafl/interface/base_interface.py index 6ebbf7b6..eed2aa8f 100644 --- a/wafl/interface/base_interface.py +++ b/wafl/interface/base_interface.py @@ -1,4 +1,5 @@ import time +from typing import List from wafl.interface.conversation import Conversation, Utterance @@ -33,7 +34,7 @@ def deactivate(self): self._is_listening = False self._choices = [] self._facts = [] - self._utterances = [] + self._utterances = Conversation() async def add_choice(self, text): self._choices.append((time.time(), text)) @@ -49,14 +50,20 @@ def get_choices_and_timestamp(self): def get_facts_and_timestamp(self): return self._facts - def get_utterances_list(self): - return [item[1] for item in self._utterances] + def get_utterances_list(self) -> List[str]: + return [ + f"{utterance.speaker}: {utterance.text}" + for utterance in self._utterances.utterances + ] + + def last_speaker(self): + return self._utterances.get_last_n(1).utterances[0].speaker def get_utterances_list_with_timestamp(self): return self._utterances def reset_history(self): - self._utterances = [] + self._utterances = Conversation() self._choices = [] self._facts = [] diff --git a/wafl/interface/conversation.py b/wafl/interface/conversation.py index 72ce8e14..fbba67b1 100644 --- a/wafl/interface/conversation.py +++ b/wafl/interface/conversation.py @@ -19,6 +19,9 @@ def __init__(self, text: str, speaker: str, timestamp: float = None): def to_dict(self): return {"text": self.text, "speaker": self.speaker, "timestamp": self.timestamp} + def __str__(self): + return f"{self.speaker}: {self.text}" + @dataclass class Conversation: @@ -38,7 +41,71 @@ def add_utterance(self, utterance: Utterance): ): return + if ( + len(self.utterances) > 0 + and utterance.speaker == self.utterances[-1].speaker + ): + self.utterances[-1].text += "\n" + utterance.text + return + self.utterances.append(utterance) + self.utterances = sorted(self.utterances, key=lambda x: x.timestamp) + + def insert_utterance(self, new_utterance: Utterance): + """ + Insert a new utterance into the conversation at the timestamp defined in the utterance. + :param new_utterance: + """ + if self.utterances is None: + self.utterances = [] + + new_utterances = [] + already_inserted = False + for utterance in self.utterances: + if ( + not already_inserted + and utterance.speaker == new_utterance.speaker + and utterance.timestamp == new_utterance.timestamp + ): + new_utterances.append( + Utterance( + new_utterance.text, + new_utterance.speaker, + new_utterance.timestamp, + ) + ) + already_inserted = True + + new_utterances.append( + Utterance(utterance.text, utterance.speaker, utterance.timestamp) + ) + + self.utterances = new_utterances + + def get_last_n(self, n: int) -> "Conversation": + return Conversation(self.utterances[-n:]) if self.utterances else Conversation() + + def get_last_speaker_utterances(self, speaker: str, n: int) -> List[str]: + if not self.utterances: + return [] + + return [ + utterance.text + for utterance in self.utterances + if utterance.speaker == speaker + ][-n:] + + def get_first_timestamp(self) -> float: + return self.utterances[0].timestamp if self.utterances else None + + def get_last_timestamp(self) -> float: + return self.utterances[-1].timestamp if self.utterances else None def to_dict(self): return [utterance.to_dict() for utterance in self.utterances] + + def get_utterances_list(self) -> List[Utterance]: + return self.utterances + + def __len__(self): + return len(self.utterances) diff --git a/wafl/interface/dummy_interface.py b/wafl/interface/dummy_interface.py index e651b2e1..d903677b 100644 --- a/wafl/interface/dummy_interface.py +++ b/wafl/interface/dummy_interface.py @@ -1,6 +1,7 @@ import re import time +from wafl.interface.conversation import Utterance from wafl.simple_text_processing.deixis import from_bot_to_user from wafl.interface.base_interface import BaseInterface from wafl.interface.utils import not_good_enough @@ -27,7 +28,7 @@ async def output(self, text: str, silent: bool = False): if not silent: self._dialogue += "bot: " + text + "\n" - self._utterances.append((time.time(), f"bot: {from_bot_to_user(text)}")) + self._insert_utterance(speaker="bot", text=text) self.bot_has_spoken(True) async def input(self) -> str: @@ -42,7 +43,7 @@ async def input(self) -> str: self._dialogue += "user: " + text + "\n" utterance = text - self._utterances.append((time.time(), f"user: {utterance}")) + self._insert_utterance(speaker="user", text=text) return utterance def bot_has_spoken(self, to_set: bool = None): diff --git a/wafl/interface/queue_interface.py b/wafl/interface/queue_interface.py index cf14c9ca..15c9b1a4 100644 --- a/wafl/interface/queue_interface.py +++ b/wafl/interface/queue_interface.py @@ -16,7 +16,7 @@ async def output(self, text: str, silent: bool = False): return self.output_queue.append({"text": text, "silent": False}) - self._insert_utterance("bot", text) + self._insert_utterance(speaker="bot", text=text) self.bot_has_spoken(True) async def input(self) -> str: @@ -24,7 +24,7 @@ async def input(self) -> str: await asyncio.sleep(0.1) text = self.input_queue.pop(0) - self._insert_utterance("user", text) + self._insert_utterance(speaker="user", text=text) return text async def insert_input(self, text: str): diff --git a/wafl/interface/voice_interface.py b/wafl/interface/voice_interface.py index b3a1c2f1..29190d4c 100644 --- a/wafl/interface/voice_interface.py +++ b/wafl/interface/voice_interface.py @@ -53,7 +53,7 @@ async def output(self, text: str, silent: bool = False): self._listener.activate() text = text - self._insert_utterance("bot", text) + self._insert_utterance(speaker="bot", text=text) print(COLOR_START + "bot> " + text + COLOR_END) await self._speaker.speak(text) self.bot_has_spoken(True) @@ -76,7 +76,7 @@ async def input(self) -> str: print(COLOR_START + "user> " + text + COLOR_END) utterance = remove_text_between_brackets(text) if utterance.strip(): - self._insert_utterance("user", text) + self._insert_utterance(speaker="user", text=text) return text diff --git a/wafl/scheduler/messages_creator.py b/wafl/scheduler/messages_creator.py index ec0d85cb..cf3d65ac 100644 --- a/wafl/scheduler/messages_creator.py +++ b/wafl/scheduler/messages_creator.py @@ -20,22 +20,9 @@ async def get_messages_window(self): return conversation - async def _get_dialogue(self): - dialogue_items = self._interface.get_utterances_list_with_timestamp() - dialogue = [] - for index, item in enumerate(dialogue_items): - dialogue.append( - ( - item[0], - get_html_from_dialogue_item( - item[1], - ), - ) - ) - - dialogue_items = dialogue - dialogue_items = sorted(dialogue_items, key=lambda x: x[0])[::-1] - dialogue_items = [item[1] for item in dialogue_items] + async def _get_dialogue(self): #### make this work with the new conversation class + dialogue_items = self._interface.get_utterances_list() + dialogue_items = [get_html_from_dialogue_item(item) for item in dialogue_items[::-1]] conversation = ( "
" ) diff --git a/wafl/templates/functions.py b/wafl/templates/functions.py index d03df431..c806a872 100644 --- a/wafl/templates/functions.py +++ b/wafl/templates/functions.py @@ -106,7 +106,7 @@ def add_to_shopping_list(list_of_items_to_add): return "Item added" -def remove_shopping_list(list_of_items_to_remove): +def remove_from_shopping_list(list_of_items_to_remove): db = json.load(open(_db_filename)) for item in list_of_items_to_remove: if item in db["shopping_list"]: From 3be1c67d952eeab7d5d34103bc23a42d2c0c8b1d Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Sat, 25 May 2024 15:31:05 +0100 Subject: [PATCH 10/16] fixed the deactivation after one utterance: computer what time is it no activates, answers, deactivates --- wafl/interface/base_interface.py | 5 +++++ wafl/interface/conversation.py | 4 ++++ wafl/interface/list_interface.py | 3 +++ wafl/scheduler/messages_creator.py | 2 +- 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/wafl/interface/base_interface.py b/wafl/interface/base_interface.py index eed2aa8f..dc73f964 100644 --- a/wafl/interface/base_interface.py +++ b/wafl/interface/base_interface.py @@ -1,3 +1,4 @@ +import re import time from typing import List @@ -51,6 +52,9 @@ def get_facts_and_timestamp(self): return self._facts def get_utterances_list(self) -> List[str]: + if not self._utterances: + return [] + return [ f"{utterance.speaker}: {utterance.text}" for utterance in self._utterances.utterances @@ -77,6 +81,7 @@ def _decorate_reply(self, text: str) -> str: return self._decorator.extract(text, self._utterances) def _insert_utterance(self, speaker, text: str): + text = re.sub(r"\[.*?\]", "", text) self._utterances.add_utterance( Utterance(text=text, speaker=speaker, timestamp=time.time()) ) diff --git a/wafl/interface/conversation.py b/wafl/interface/conversation.py index fbba67b1..e3a0112e 100644 --- a/wafl/interface/conversation.py +++ b/wafl/interface/conversation.py @@ -105,7 +105,11 @@ def to_dict(self): return [utterance.to_dict() for utterance in self.utterances] def get_utterances_list(self) -> List[Utterance]: + if not self.utterances: + return [] return self.utterances def __len__(self): + if not self.utterances: + return 0 return len(self.utterances) diff --git a/wafl/interface/list_interface.py b/wafl/interface/list_interface.py index c7416d2e..6febcff9 100644 --- a/wafl/interface/list_interface.py +++ b/wafl/interface/list_interface.py @@ -30,6 +30,9 @@ async def insert_input(self, text: str): ) def bot_has_spoken(self, to_set: bool = None): + if to_set == None: + return all(interface.bot_has_spoken() for interface in self._interfaces_list) + for interface in self._interfaces_list: interface.bot_has_spoken(to_set) diff --git a/wafl/scheduler/messages_creator.py b/wafl/scheduler/messages_creator.py index cf3d65ac..46b6c041 100644 --- a/wafl/scheduler/messages_creator.py +++ b/wafl/scheduler/messages_creator.py @@ -20,7 +20,7 @@ async def get_messages_window(self): return conversation - async def _get_dialogue(self): #### make this work with the new conversation class + async def _get_dialogue(self): dialogue_items = self._interface.get_utterances_list() dialogue_items = [get_html_from_dialogue_item(item) for item in dialogue_items[::-1]] conversation = ( From 09fe52df7f48551942221b5c2ce5b56445c5e4e2 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Sun, 26 May 2024 13:32:00 +0100 Subject: [PATCH 11/16] update rule maker with more complex rule templates. The tag is now part of the last clause --- wafl/answerer/rule_maker.py | 6 +----- wafl/rules.py | 10 ++++++---- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/wafl/answerer/rule_maker.py b/wafl/answerer/rule_maker.py index f9775d21..91449270 100644 --- a/wafl/answerer/rule_maker.py +++ b/wafl/answerer/rule_maker.py @@ -23,11 +23,7 @@ async def create_from_query(self, query): rules = rules[: self._max_num_rules] rules_texts = [] for rule in rules: - rules_text = rule.get_string_using_template( - "- If {effect} go through the following points:" - ) - rules_text += f'{rule.indent_str}- After you completed all the steps output "{self._delete_current_rule}" and continue the conversation.\n' - + rules_text = rule.get_string_using_template('- {effect}:\n{clauses} and then output "{self._delete_current_rule}".\n') rules_texts.append(rules_text) await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") diff --git a/wafl/rules.py b/wafl/rules.py index fc4837cf..c04a834c 100644 --- a/wafl/rules.py +++ b/wafl/rules.py @@ -13,14 +13,16 @@ def toJSON(self): return str(self) def get_string_using_template(self, effect_template: str) -> str: - rule_str = effect_template.replace("{effect}", self.effect.text) + "\n" - return self._add_clauses(rule_str) + rule_str = effect_template.replace("{effect}", self.effect.text) + rule_str = rule_str.replace("{clauses}", self._get_clauses()) + return rule_str def __str__(self): rule_str = self.effect.text + "\n" - return self._add_clauses(rule_str) + return rule_str + self._get_clauses() - def _add_clauses(self, rule_str: str) -> str: + def _get_clauses(self) -> str: + rule_str = "" for cause in self.causes: try: rule_str += self._recursively_add_clauses(cause) From f3159d971e72a06561b5985d391a73fb8785353a Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Mon, 27 May 2024 11:14:37 +0100 Subject: [PATCH 12/16] iterating conversation with remember tags --- wafl/answerer/dialogue_answerer.py | 46 +++++++++++++++++++++--------- wafl/answerer/rule_maker.py | 2 +- wafl/events/conversation_events.py | 2 +- wafl/rules.py | 2 +- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index a0231361..112c2602 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -15,7 +15,7 @@ class DialogueAnswerer(BaseAnswerer): def __init__(self, config, knowledge, interface, code_path, logger): self._delete_current_rule = "" - self._bridge = LLMChitChatAnswerClient(config) + self._client = LLMChitChatAnswerClient(config) self._knowledge = knowledge self._logger = logger self._interface = interface @@ -65,8 +65,9 @@ async def answer(self, query_text): conversational_timestamp=conversational_timestamp, ) + final_answer_text = "" for _ in range(self._max_predictions): - original_answer_text = await self._bridge.get_answer( + original_answer_text = await self._client.get_answer( text=facts, rules_text=rules_text, dialogue=conversation, @@ -93,27 +94,36 @@ async def answer(self, query_text): self._prior_rules = None conversation.add_utterance( Utterance( - original_answer_text, + answer_text, "bot", ) ) continue + final_answer_text += answer_text if not memories: break facts += "\n" + "\n".join(memories) + conversation.add_utterance( Utterance( - original_answer_text, + answer_text, "bot", ) ) + conversation.add_utterance( + Utterance( + "Continue", + "user", + ) + ) + if self._logger: - self._logger.write(f"Answer within dialogue: The answer is {answer_text}") + self._logger.write(f"Answer within dialogue: The answer is {final_answer_text}") - return Answer.create_from_text(answer_text) + return Answer.create_from_text(final_answer_text) async def _get_relevant_facts( self, query, has_prior_rules, conversational_timestamp @@ -189,21 +199,29 @@ async def _substitute_results_in_answer(self, answer_text): async def _substitute_memory_in_answer_and_get_memories_if_present( self, answer_text ): - matches = re.finditer(r"(.*?)", answer_text, re.DOTALL) + matches = re.finditer( + r"(.*?)|(.*?)$", + answer_text, + re.DOTALL | re.MULTILINE, + ) memories = [] for match in matches: - to_execute = match.group(1) - answer_text = answer_text.replace(match.group(0), "") - memories.append(to_execute) + to_substitute = match.group(1) + if not to_substitute: + continue + answer_text = answer_text.replace(match.group(0), "[Output in memory]") + memories.append(to_substitute) matches = re.finditer( - r"(.*?\))$", answer_text, re.DOTALL | re.MULTILINE + r"(.*?)$", answer_text, re.DOTALL | re.MULTILINE ) memories = [] for match in matches: - to_execute = match.group(1) - answer_text = answer_text.replace(match.group(0), "") - memories.append(to_execute) + to_substitute = match.group(1) + if not to_substitute: + continue + answer_text = answer_text.replace(match.group(0), "[Output in memory]") + memories.append(to_substitute) return answer_text, memories diff --git a/wafl/answerer/rule_maker.py b/wafl/answerer/rule_maker.py index 91449270..57193f17 100644 --- a/wafl/answerer/rule_maker.py +++ b/wafl/answerer/rule_maker.py @@ -23,7 +23,7 @@ async def create_from_query(self, query): rules = rules[: self._max_num_rules] rules_texts = [] for rule in rules: - rules_text = rule.get_string_using_template('- {effect}:\n{clauses} and then output "{self._delete_current_rule}".\n') + rules_text = rule.get_string_using_template('- {effect}:\n{clauses} and then output ' + f'"{self._delete_current_rule}".\n') rules_texts.append(rules_text) await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") diff --git a/wafl/events/conversation_events.py b/wafl/events/conversation_events.py index d6186d8a..d18e9a9b 100644 --- a/wafl/events/conversation_events.py +++ b/wafl/events/conversation_events.py @@ -21,7 +21,7 @@ def __init__( self._config = config self._knowledge = load_knowledge(config, logger) self._answerer = create_answerer(config, self._knowledge, interface, logger) - self._answerer._bridge._connector._cache = {} + self._answerer._client._connector._cache = {} self._interface = interface self._logger = logger self._is_computing = False diff --git a/wafl/rules.py b/wafl/rules.py index c04a834c..738e0bbf 100644 --- a/wafl/rules.py +++ b/wafl/rules.py @@ -33,7 +33,7 @@ def _get_clauses(self) -> str: print() raise e - return rule_str + return rule_str[:-1] def _recursively_add_clauses(self, query: str, depth: int = 1) -> str: indentation = self.indent_str * depth From 1e2227061128b5bb4baac3d42556a370e460ee5a Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Mon, 27 May 2024 12:05:42 +0100 Subject: [PATCH 13/16] the conversation is updated to deal with remember rules. The tag is changed into [delete_rule] --- wafl/answerer/dialogue_answerer.py | 13 +++++-------- wafl/answerer/rule_maker.py | 2 +- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 112c2602..2a453954 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -14,7 +14,7 @@ class DialogueAnswerer(BaseAnswerer): def __init__(self, config, knowledge, interface, code_path, logger): - self._delete_current_rule = "" + self._delete_current_rule = "[delete_rule]" self._client = LLMChitChatAnswerClient(config) self._knowledge = knowledge self._logger = logger @@ -92,13 +92,8 @@ async def answer(self, query_text): if self._delete_current_rule in answer_text: self._prior_rules = None - conversation.add_utterance( - Utterance( - answer_text, - "bot", - ) - ) - continue + final_answer_text += answer_text + break final_answer_text += answer_text if not memories: @@ -212,6 +207,7 @@ async def _substitute_memory_in_answer_and_get_memories_if_present( answer_text = answer_text.replace(match.group(0), "[Output in memory]") memories.append(to_substitute) + answer_text = answer_text.replace("
", "\n") matches = re.finditer( r"(.*?)$", answer_text, re.DOTALL | re.MULTILINE ) @@ -223,6 +219,7 @@ async def _substitute_memory_in_answer_and_get_memories_if_present( answer_text = answer_text.replace(match.group(0), "[Output in memory]") memories.append(to_substitute) + return answer_text, memories async def _run_code(self, to_execute): diff --git a/wafl/answerer/rule_maker.py b/wafl/answerer/rule_maker.py index 57193f17..223760bb 100644 --- a/wafl/answerer/rule_maker.py +++ b/wafl/answerer/rule_maker.py @@ -23,7 +23,7 @@ async def create_from_query(self, query): rules = rules[: self._max_num_rules] rules_texts = [] for rule in rules: - rules_text = rule.get_string_using_template('- {effect}:\n{clauses} and then output ' + f'"{self._delete_current_rule}".\n') + rules_text = rule.get_string_using_template('- {effect}:\n{clauses}\nAfter you completed all the steps output ' + f'"{self._delete_current_rule}".\n') rules_texts.append(rules_text) await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") From 1e574297410f80d9f4d4a21e37c82175aafe9fc3 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Mon, 27 May 2024 14:54:09 +0100 Subject: [PATCH 14/16] only retrieving one rule at a time --- tests/test_prompts.py | 20 ++++++------------ wafl/answerer/dialogue_answerer.py | 21 +++++++++---------- wafl/answerer/entailer.py | 4 +--- wafl/answerer/rule_maker.py | 13 +++++++++--- .../clients/llm_chitchat_answer_client.py | 5 +++-- wafl/interface/list_interface.py | 4 +++- wafl/scheduler/messages_creator.py | 4 +++- 7 files changed, 36 insertions(+), 35 deletions(-) diff --git a/tests/test_prompts.py b/tests/test_prompts.py index 55928c17..f5c7b826 100644 --- a/tests/test_prompts.py +++ b/tests/test_prompts.py @@ -18,12 +18,8 @@ def test_utterance(self): ) def test_conversation(self): - utterance1 = Utterance( - text="Hello", speaker="user", timestamp=2 - ) - utterance2 = Utterance( - text="Hi", speaker="bot", timestamp=1 - ) + utterance1 = Utterance(text="Hello", speaker="user", timestamp=2) + utterance2 = Utterance(text="Hi", speaker="bot", timestamp=1) conversation = Conversation(utterances=[utterance1, utterance2]) self.assertEqual( conversation.to_dict(), @@ -38,16 +34,12 @@ def test_conversation(self): "speaker": "bot", "timestamp": 1, }, - ] + ], ) def test_prompt(self): - utterance1 = Utterance( - text="Hello", speaker="user", timestamp=2 - ) - utterance2 = Utterance( - text="Hi", speaker="bot", timestamp=1 - ) + utterance1 = Utterance(text="Hello", speaker="user", timestamp=2) + utterance2 = Utterance(text="Hi", speaker="bot", timestamp=1) conversation = Conversation(utterances=[utterance1, utterance2]) prompt = PromptCreator.create(system_prompt="Hello", conversation=conversation) self.assertEqual( @@ -65,6 +57,6 @@ def test_prompt(self): "speaker": "bot", "timestamp": 1, }, - ] + ], }, ) diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 2a453954..0ee73def 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -24,13 +24,13 @@ def __init__(self, config, knowledge, interface, code_path, logger): self._max_num_past_utterances_for_rules = 0 self._prior_facts_with_timestamp = [] self._init_python_module(code_path.replace(".py", "")) - self._prior_rules = None + self._prior_rules = [] self._max_predictions = 3 self._rule_creator = RuleMaker( knowledge, config, interface, - max_num_rules=2, + max_num_rules=1, delete_current_rule=self._delete_current_rule, ) @@ -91,7 +91,7 @@ async def answer(self, query_text): continue if self._delete_current_rule in answer_text: - self._prior_rules = None + self._prior_rules = [] final_answer_text += answer_text break @@ -114,9 +114,10 @@ async def answer(self, query_text): ) ) - if self._logger: - self._logger.write(f"Answer within dialogue: The answer is {final_answer_text}") + self._logger.write( + f"Answer within dialogue: The answer is {final_answer_text}" + ) return Answer.create_from_text(final_answer_text) @@ -156,11 +157,10 @@ async def _get_relevant_facts( async def _get_relevant_rules(self, query): rules = await self._rule_creator.create_from_query(query) - if not rules and self._prior_rules: - rules = self._prior_rules - self._prior_rules = rules - return rules - + for rule in rules: + if rule not in self._prior_rules: + self._prior_rules.append(rule) + return self._prior_rules def _init_python_module(self, module_name): self._module = import_module(module_name) @@ -219,7 +219,6 @@ async def _substitute_memory_in_answer_and_get_memories_if_present( answer_text = answer_text.replace(match.group(0), "[Output in memory]") memories.append(to_substitute) - return answer_text, memories async def _run_code(self, to_execute): diff --git a/wafl/answerer/entailer.py b/wafl/answerer/entailer.py index 97bd4297..54e4e3e2 100644 --- a/wafl/answerer/entailer.py +++ b/wafl/answerer/entailer.py @@ -38,6 +38,4 @@ def _get_dialogue_prompt(self, dialogue, lhs, rhs): 2) {rhs.lower()} Please answer "yes" if the two sentences are similar or "no" if not: """.strip() - return Conversation( - [Utterance(speaker="user", text=text)] - ) + return Conversation([Utterance(speaker="user", text=text)]) diff --git a/wafl/answerer/rule_maker.py b/wafl/answerer/rule_maker.py index 223760bb..d181b6a5 100644 --- a/wafl/answerer/rule_maker.py +++ b/wafl/answerer/rule_maker.py @@ -1,3 +1,6 @@ +from typing import List + + class RuleMaker: def __init__( self, @@ -18,13 +21,17 @@ def __init__( else: self._max_indentation = config.get_value("max_recursion") - async def create_from_query(self, query): + async def create_from_query(self, query: "Query") -> List[str]: rules = await self._knowledge.ask_for_rule_backward(query, threshold=0.92) rules = rules[: self._max_num_rules] rules_texts = [] for rule in rules: - rules_text = rule.get_string_using_template('- {effect}:\n{clauses}\nAfter you completed all the steps output ' + f'"{self._delete_current_rule}".\n') + rules_text = rule.get_string_using_template( + "- {effect}:\n{clauses}\n" + + rule.indent_str + + f'- After you completed all the steps output "{self._delete_current_rule}".\n' + ) rules_texts.append(rules_text) await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") - return "\n".join(rules_texts) + return rules_texts diff --git a/wafl/connectors/clients/llm_chitchat_answer_client.py b/wafl/connectors/clients/llm_chitchat_answer_client.py index b589c7ac..2302dc31 100644 --- a/wafl/connectors/clients/llm_chitchat_answer_client.py +++ b/wafl/connectors/clients/llm_chitchat_answer_client.py @@ -1,5 +1,6 @@ import os import textwrap +from typing import List from wafl.connectors.factories.llm_connector_factory import LLMConnectorFactory from wafl.connectors.prompt_template import PromptTemplate @@ -12,8 +13,8 @@ def __init__(self, config): self._connector = LLMConnectorFactory.get_connector(config) self._config = config - async def get_answer(self, text: str, dialogue: str, rules_text: str) -> str: - prompt = await self._get_answer_prompt(text, dialogue, rules_text) + async def get_answer(self, text: str, dialogue: str, rules_text: List[str]) -> str: + prompt = await self._get_answer_prompt(text, dialogue, "\n".join(rules_text)) return await self._connector.generate(prompt) async def _get_answer_prompt( diff --git a/wafl/interface/list_interface.py b/wafl/interface/list_interface.py index 6febcff9..27a24bd0 100644 --- a/wafl/interface/list_interface.py +++ b/wafl/interface/list_interface.py @@ -31,7 +31,9 @@ async def insert_input(self, text: str): def bot_has_spoken(self, to_set: bool = None): if to_set == None: - return all(interface.bot_has_spoken() for interface in self._interfaces_list) + return all( + interface.bot_has_spoken() for interface in self._interfaces_list + ) for interface in self._interfaces_list: interface.bot_has_spoken(to_set) diff --git a/wafl/scheduler/messages_creator.py b/wafl/scheduler/messages_creator.py index 46b6c041..04e1a6c2 100644 --- a/wafl/scheduler/messages_creator.py +++ b/wafl/scheduler/messages_creator.py @@ -22,7 +22,9 @@ async def get_messages_window(self): async def _get_dialogue(self): dialogue_items = self._interface.get_utterances_list() - dialogue_items = [get_html_from_dialogue_item(item) for item in dialogue_items[::-1]] + dialogue_items = [ + get_html_from_dialogue_item(item) for item in dialogue_items[::-1] + ] conversation = ( "
" ) From b88f4d242cbe4b84137dddd55b40d24be42fa836 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Mon, 27 May 2024 14:56:46 +0100 Subject: [PATCH 15/16] updated template --- documentation/source/running_WAFL.rst | 3 ++- wafl/templates/rules.yaml | 35 +++++++++++++++++---------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/documentation/source/running_WAFL.rst b/documentation/source/running_WAFL.rst index c8e0c10b..478d58a7 100644 --- a/documentation/source/running_WAFL.rst +++ b/documentation/source/running_WAFL.rst @@ -5,7 +5,8 @@ There are four modes in which to run the system $ wafl run ---------- -Starts all the available interfaces of the chatbot at the same time. +Starts the available interfaces of the chatbot at the same time. +This is equivalent to running run-audio and run-server in parallel (see below). $ wafl run-audio diff --git a/wafl/templates/rules.yaml b/wafl/templates/rules.yaml index 78938a1d..fed65b08 100644 --- a/wafl/templates/rules.yaml +++ b/wafl/templates/rules.yaml @@ -24,28 +24,37 @@ rules: - the user wants to know the weather tomorrow: - output "check_tomorrow_weather()". + - the user wants the news from the guardian website: + - output exactly " The guardian headlines are get_guardian_headlines()". + - use the headlines to create a list of bullet points separated by new lines "\n" + - output the bullet points to the user in a summarised form + - the user wants to summarise a website: - - you'll need the website url to summarise - - output exactly " The website content is get_website('WEBSITE_URL') ". - - summarise the website content given what you remember - - output the summary + - Ask the website url to summarise + - output exactly " The website content is get_website('WEBSITE_URL') " + - output a summary of the website content - the user wants to know what is in the shopping list: - output "get_shopping_list()". - the user wants to add something to the shopping list: - - The task here is to add the item to the shopping list using a python function - - example "add milk to the shopping list" should output "add_to_shopping_list(['milk'])" - - output "add_to_shopping_list(ITEMS_TO_ADD)". + - The task here is to add the item to the shopping list using a python function + - example "add milk to the shopping list" should output "add_to_shopping_list(['milk'])" + - output "add_to_shopping_list(ITEMS_TO_ADD)". - the user wants to remove something to the shopping list: - - The task here is to remove the item from the shopping list using a python function - - example "remove milk from the shopping list" should output "remove_from_shopping_list(['milk'])" - - output "remove_from_shopping_list(ITEMS_TO_REMOVE)". + - The task here is to remove the item from the shopping list using a python function + - example "remove milk from the shopping list" should output "remove_from_shopping_list(['milk'])" + - output "remove_from_shopping_list(ITEMS_TO_REMOVE)". + + - the user asks to write something in a file: + - unless specified, the file is called "file.txt" + - The text to write can contain new lines. They can be added to the function using "\n" + - output exactly "write_to_file('file.txt', 'TEXT_TO_WRITE')". - the user asks something about cities, capitals, countries, buildings, famous people, bars, restaurants, rivers, mountains, lakes, seas, oceans, planets, stars, galaxies: - - say that you are just improvising the answer - - say what you think answer the question + - say that you are just improvising the answer + - say what you think answer the question - the user thanks the bot: - The intention of the user is to close the conversation @@ -53,4 +62,4 @@ rules: - the user says "end this conversation": - The intention of the user is to close the conversation - - You must answer the user by writing "close_conversation()" \ No newline at end of file + - You must answer the user by writing "close_conversation()" From b30179be5bb311daeffa9a8270a3bb17e9c37123 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Mon, 27 May 2024 14:58:54 +0100 Subject: [PATCH 16/16] changed variables for version --- README.md | 2 +- documentation/source/index.rst | 2 +- wafl/variables.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 8b95a194..fcf025a2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# WAFL 0.0.80 [![Tests](https://github.com/fractalego/wafl/actions/workflows/development-tests1.yml/badge.svg)](https://github.com/fractalego/wafl/actions/workflows/development-tests1.yml)[![Docs](https://readthedocs.org/projects/wafl/badge/?version=latest)](https://wafl.readthedocs.io/en/latest/) +# WAFL 0.0.90 [![Tests](https://github.com/fractalego/wafl/actions/workflows/development-tests1.yml/badge.svg)](https://github.com/fractalego/wafl/actions/workflows/development-tests1.yml)[![Docs](https://readthedocs.org/projects/wafl/badge/?version=latest)](https://wafl.readthedocs.io/en/latest/) Introduction ============ diff --git a/documentation/source/index.rst b/documentation/source/index.rst index 9f32cc73..30c4f872 100644 --- a/documentation/source/index.rst +++ b/documentation/source/index.rst @@ -3,7 +3,7 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to WAFL's 0.0.80 documentation! +Welcome to WAFL's 0.0.90 documentation! ======================================= .. toctree:: diff --git a/wafl/variables.py b/wafl/variables.py index 0eee6ad9..27f3aa5c 100644 --- a/wafl/variables.py +++ b/wafl/variables.py @@ -1,9 +1,9 @@ def get_variables(): return { - "version": "0.0.84", + "version": "0.0.90", } def is_supported(wafl_llm_version): - supported_versions = ["0.0.82", "0.0.83"] + supported_versions = ["0.0.90"] return wafl_llm_version in supported_versions