From 99f61e7710ba7210824ffc1f21313ec288432b7d Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Sat, 23 Dec 2023 18:41:52 +0000 Subject: [PATCH 01/11] rudimental actions implemented --- setup.py | 1 - todo.txt | 27 ++++++++++++++----------- wafl/command_line.py | 13 +++++++++++- wafl/filter/base_filter.py | 3 --- wafl/interface/dummy_interface.py | 26 +++++++++++++----------- wafl/interface/queue_interface.py | 8 +------- wafl/interface/voice_interface.py | 8 +------- wafl/knowledge/single_file_knowledge.py | 4 ---- wafl/knowledge/utils.py | 14 ------------- wafl/run.py | 23 +++++++++++++++++++++ wafl/templates/actions.yaml | 3 +++ 11 files changed, 69 insertions(+), 61 deletions(-) delete mode 100644 wafl/filter/base_filter.py create mode 100644 wafl/templates/actions.yaml diff --git a/setup.py b/setup.py index e4b1e407..ba201367 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,6 @@ "wafl.connectors.remote", "wafl.events", "wafl.extractors", - "wafl.filter", "wafl.inference", "wafl.interface", "wafl.knowledge", diff --git a/todo.txt b/todo.txt index 9808b652..3d25c2ff 100644 --- a/todo.txt +++ b/todo.txt @@ -1,11 +1,14 @@ ### TODO +* create actions from command line #### +* clean single_file_knowledge: it still divides facts, question, and incomplete for rule retrieval. + Use just one retriever and threshold for all -* push docker image to docker hub -* update all to the vast.ai -* write new docs -* new version on github! -* make it easy to run the llm on the server (something more than docker perhaps)? +/* push docker image to docker hub +/* update all to the vast.ai +/* write new docs +/* new version on github! +/* make it easy to run the llm on the server (something more than docker perhaps)? /* re-train the whisper model using the distilled version /* make rules reloadable @@ -15,16 +18,16 @@ /* update tests -* lots of duplicates in facts! Avoid that - * use timestamp for facts (or an index in terms of conversation item) - * select only most n recent timestamps - * do not add facts that are already in the list (before cluster_facts) +/* lots of duplicates in facts! Avoid that +/ * use timestamp for facts (or an index in terms of conversation item) +/ * select only most n recent timestamps +/ * do not add facts that are already in the list (before cluster_facts) -* redeploy locally and on the server -* new version on github +/* redeploy locally and on the server +/* new version on github -* add rules for +/* add rules for / shopping lists trains and music diff --git a/wafl/command_line.py b/wafl/command_line.py index 87b81ada..aeac726e 100644 --- a/wafl/command_line.py +++ b/wafl/command_line.py @@ -7,7 +7,7 @@ run_from_command_line, run_testcases, print_incipit, - download_models, + download_models, run_action, ) from wafl.runners.run_from_audio import run_from_audio from wafl.runners.run_web_interface import run_app @@ -21,6 +21,7 @@ def print_help(): print("> wafl run-audio: Run a voice-powered version of the chatbot") print("> wafl run-server: Run a webserver version of the chatbot") print("> wafl run-tests: Run the tests in testcases.txt") + print("> wafl run-action : Run the action from actions.yaml") print() @@ -56,6 +57,16 @@ def process_cli(): run_testcases() remove_preprocessed("/") + elif command == "run-action": + if len(arguments) > 2: + action_name = arguments[2] + + else: + print("Please provide the action name as the second argument.") + return + + run_action(action_name) + elif command == "help": print_help() diff --git a/wafl/filter/base_filter.py b/wafl/filter/base_filter.py deleted file mode 100644 index 732dab10..00000000 --- a/wafl/filter/base_filter.py +++ /dev/null @@ -1,3 +0,0 @@ -class BaseAnswerFilter: - async def filter(self, dialogue_list, query_text) -> str: - raise NotImplementedError() diff --git a/wafl/interface/dummy_interface.py b/wafl/interface/dummy_interface.py index 1a8489bc..3a104957 100644 --- a/wafl/interface/dummy_interface.py +++ b/wafl/interface/dummy_interface.py @@ -7,30 +7,32 @@ class DummyInterface(BaseInterface): - def __init__(self, to_utter=None, output_filter=None): + def __init__(self, to_utter=None, print_utterances=False): super().__init__() self._to_utter = to_utter self._bot_has_spoken = False self._dialogue = "" - self._output_filter = output_filter + self._print_utterances = print_utterances async def output(self, text: str, silent: bool = False): - if silent: - print(text) - return + if self._print_utterances: + if silent: + print(text) - if self._output_filter: - text = await self._output_filter.filter( - self.get_utterances_list_with_timestamp(), text - ) + else: + print("bot> " + text) - self._dialogue += "bot: " + text + "\n" - self._utterances.append((time.time(), f"bot: {from_bot_to_user(text)}")) - self.bot_has_spoken(True) + if not silent: + self._dialogue += "bot: " + text + "\n" + self._utterances.append((time.time(), f"bot: {from_bot_to_user(text)}")) + self.bot_has_spoken(True) async def input(self) -> str: text = self._to_utter.pop(0).strip() text = self.__remove_activation_word_and_normalize(text) + if self._print_utterances: + print("user> " + text) + while self._is_listening and not_good_enough(text): await self.output("I did not quite understand that") text = self._to_utter.pop(0) diff --git a/wafl/interface/queue_interface.py b/wafl/interface/queue_interface.py index 860e24af..08fdc247 100644 --- a/wafl/interface/queue_interface.py +++ b/wafl/interface/queue_interface.py @@ -5,23 +5,17 @@ class QueueInterface(BaseInterface): - def __init__(self, output_filter=None): + def __init__(self): super().__init__() self._bot_has_spoken = False self.input_queue = [] self.output_queue = [] - self._output_filter = output_filter async def output(self, text: str, silent: bool = False): if silent: self.output_queue.append({"text": text, "silent": True}) return - if self._output_filter: - text = await self._output_filter.filter( - self.get_utterances_list_with_timestamp(), text - ) - utterance = text self.output_queue.append({"text": utterance, "silent": False}) self._utterances.append((time.time(), f"bot: {text}")) diff --git a/wafl/interface/voice_interface.py b/wafl/interface/voice_interface.py index ce9d149c..46e53999 100644 --- a/wafl/interface/voice_interface.py +++ b/wafl/interface/voice_interface.py @@ -19,7 +19,7 @@ class VoiceInterface(BaseInterface): - def __init__(self, config, output_filter=None): + def __init__(self, config): super().__init__() self._sound_speaker = SoundFileSpeaker() self._activation_sound_filename = self.__get_activation_sound_from_config( @@ -41,7 +41,6 @@ def __init__(self, config, output_filter=None): ) self._bot_has_spoken = False self._utterances = [] - self._output_filter = output_filter async def add_hotwords_from_knowledge( self, knowledge: "Knowledge", max_num_words: int = 100, count_threshold: int = 5 @@ -65,11 +64,6 @@ async def output(self, text: str, silent: bool = False): if not text: return - if self._output_filter: - text = await self._output_filter.filter( - self.get_utterances_list_with_timestamp(), text - ) - self._listener.activate() text = from_bot_to_user(text) self._utterances.append((time.time(), f"bot: {text}")) diff --git a/wafl/knowledge/single_file_knowledge.py b/wafl/knowledge/single_file_knowledge.py index 66589d5b..cebb4a85 100644 --- a/wafl/knowledge/single_file_knowledge.py +++ b/wafl/knowledge/single_file_knowledge.py @@ -11,7 +11,6 @@ from wafl.knowledge.utils import ( text_is_exact_string, get_first_cluster_of_rules, - filter_out_rules_that_are_too_dissimilar_to_query, ) from wafl.parsing.line_rules_parser import parse_rule_from_single_line from wafl.parsing.rules_parser import get_facts_and_rules_from_text @@ -301,7 +300,4 @@ async def _ask_for_rule_backward_with_scores( ][: self._max_rules_per_type] rules_and_scores = fact_rules + question_rules + incomplete_rules - rules_and_scores = filter_out_rules_that_are_too_dissimilar_to_query( - query, rules_and_scores - ) return rules_and_scores diff --git a/wafl/knowledge/utils.py b/wafl/knowledge/utils.py index 6928bb24..021a2b9e 100644 --- a/wafl/knowledge/utils.py +++ b/wafl/knowledge/utils.py @@ -42,20 +42,6 @@ def get_first_cluster_of_rules(rules_and_threshold): return rules -def filter_out_rules_that_are_too_dissimilar_to_query(query, rules_and_scores): - num_query_words = len(query.text.split()) - new_rules_and_scores = [] - for item in rules_and_scores: - rule = item[0] - num_rule_effect_words = len(rule.effect.text.split()) - if num_query_words < num_rule_effect_words / 3: - continue - - new_rules_and_scores.append(item) - - return new_rules_and_scores - - async def filter_out_rules_through_entailment(entailer, query, rules_and_scores): new_rules_and_scores = [] for rule, score in rules_and_scores: diff --git a/wafl/run.py b/wafl/run.py index b0397e84..92e9a8d7 100644 --- a/wafl/run.py +++ b/wafl/run.py @@ -1,9 +1,12 @@ import asyncio +import yaml + from wafl.config import Configuration from wafl.exceptions import CloseConversation from wafl.events.conversation_events import ConversationEvents from wafl.interface.command_line_interface import CommandLineInterface +from wafl.interface.dummy_interface import DummyInterface from wafl.logger.local_file_logger import LocalFileLogger from wafl.testcases import ConversationTestCases from wafl.variables import get_variables @@ -48,6 +51,26 @@ def run_testcases(): asyncio.run(testcases.run()) +def run_action(action_name): + print(f"Running the action {action_name}\n") + actions = yaml.safe_load(open("actions.yaml")) + if action_name not in actions: + raise ValueError(f"Action {action_name} not found in actions.yaml") + + actions_list = actions[action_name] + interface = DummyInterface(to_utter=actions_list.copy(), print_utterances=True) + config = Configuration.load_local_config() + conversation_events = ConversationEvents( + config=config, + interface=interface, + logger=_logger, + ) + for action in actions_list: + asyncio.run(conversation_events.process_next()) + + print(f"Action {action_name} finished.") + + def download_models(): import nltk diff --git a/wafl/templates/actions.yaml b/wafl/templates/actions.yaml new file mode 100644 index 00000000..7433d9ab --- /dev/null +++ b/wafl/templates/actions.yaml @@ -0,0 +1,3 @@ +action_1_summarise_guardian: + - summarise https://theguardian.com + - write the output to the file summary_guardian.txt From 26347f2809252c2ac65bd49c29ccd58ff6b0c8b8 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Mon, 25 Dec 2023 13:35:19 +0000 Subject: [PATCH 02/11] resetting conversation memory --- todo.txt | 6 +++- wafl/answerer/dialogue_answerer.py | 32 +++++++++++++++---- wafl/command_line.py | 7 ++-- .../factories/whisper_connector_factory.py | 4 +-- wafl/events/conversation_events.py | 5 +++ wafl/scheduler/web_loop.py | 1 + 6 files changed, 43 insertions(+), 12 deletions(-) diff --git a/todo.txt b/todo.txt index 3d25c2ff..3dd3f1f8 100644 --- a/todo.txt +++ b/todo.txt @@ -1,6 +1,10 @@ ### TODO -* create actions from command line #### +* create actions from command line + * add condition of when to stop to the actions + +* BUG: the prior memory leaks even when re-loading the interface!!! + * clean single_file_knowledge: it still divides facts, question, and incomplete for rule retrieval. Use just one retriever and threshold for all diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 93bae0e1..049e7115 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -53,12 +53,16 @@ async def answer(self, query_text): if rules_texts: last_timestamp = dialogue_items[-1][0] self._prior_rule_with_timestamp = (last_timestamp, rules_texts) - dialogue_items = self._insert_rule_into_dialogue_items(rules_texts, last_timestamp, dialogue_items) + dialogue_items = self._insert_rule_into_dialogue_items( + rules_texts, last_timestamp, dialogue_items + ) elif self._prior_rule_with_timestamp: last_timestamp = self._prior_rule_with_timestamp[0] rules_texts = self._prior_rule_with_timestamp[1] - dialogue_items = self._insert_rule_into_dialogue_items(rules_texts, last_timestamp, dialogue_items) + dialogue_items = self._insert_rule_into_dialogue_items( + rules_texts, last_timestamp, dialogue_items + ) last_bot_utterances = get_last_bot_utterances(dialogue_items, num_utterances=3) last_user_utterance = get_last_user_utterance(dialogue_items) @@ -118,11 +122,16 @@ async def _get_relevant_facts( query, is_from_user=True, knowledge_name="/", threshold=0.8 ) if facts_and_thresholds: - facts = [item[0].text for item in facts_and_thresholds if item[0].text not in memory] + facts = [ + item[0].text + for item in facts_and_thresholds + if item[0].text not in memory + ] self._prior_facts_with_timestamp.extend( (item, conversational_timestamp) for item in facts ) memory = "\n".join([item[0] for item in self._prior_facts_with_timestamp]) + await self._interface.add_fact(f"The bot remembers the facts:\n{memory}") else: if is_question(query.text) and not has_prior_rules: @@ -216,12 +225,23 @@ async def _run_code(self, to_execute): return result - def _insert_rule_into_dialogue_items(self, rules_texts, rule_timestamp, dialogue_items): + def _insert_rule_into_dialogue_items( + self, rules_texts, rule_timestamp, dialogue_items + ): new_dialogue_items = [] already_inserted = False for timestamp, utterance in dialogue_items: - if not already_inserted and utterance.startswith("user:") and rule_timestamp == timestamp: - new_dialogue_items.append((rule_timestamp, f"user: I want you to follow these rules:\n{rules_texts}")) + if ( + not already_inserted + and utterance.startswith("user:") + and rule_timestamp == timestamp + ): + new_dialogue_items.append( + ( + rule_timestamp, + f"user: I want you to follow these rules:\n{rules_texts}", + ) + ) already_inserted = True new_dialogue_items.append((timestamp, utterance)) diff --git a/wafl/command_line.py b/wafl/command_line.py index aeac726e..40d071a5 100644 --- a/wafl/command_line.py +++ b/wafl/command_line.py @@ -7,7 +7,8 @@ run_from_command_line, run_testcases, print_incipit, - download_models, run_action, + download_models, + run_action, ) from wafl.runners.run_from_audio import run_from_audio from wafl.runners.run_web_interface import run_app @@ -21,7 +22,9 @@ def print_help(): print("> wafl run-audio: Run a voice-powered version of the chatbot") print("> wafl run-server: Run a webserver version of the chatbot") print("> wafl run-tests: Run the tests in testcases.txt") - print("> wafl run-action : Run the action from actions.yaml") + print( + "> wafl run-action : Run the action from actions.yaml" + ) print() diff --git a/wafl/connectors/factories/whisper_connector_factory.py b/wafl/connectors/factories/whisper_connector_factory.py index 8304adc8..5b4e1c2f 100644 --- a/wafl/connectors/factories/whisper_connector_factory.py +++ b/wafl/connectors/factories/whisper_connector_factory.py @@ -4,6 +4,4 @@ class WhisperConnectorFactory: @staticmethod def get_connector(config): - return RemoteWhisperConnector( - config.get_value("listener_model") - ) + return RemoteWhisperConnector(config.get_value("listener_model")) diff --git a/wafl/events/conversation_events.py b/wafl/events/conversation_events.py index 95dedb3b..a63eef18 100644 --- a/wafl/events/conversation_events.py +++ b/wafl/events/conversation_events.py @@ -105,6 +105,11 @@ def is_computing(self): def reload_knowledge(self): self._knowledge = load_knowledge(self._config, self._logger) + def reset_discourse_memory(self): + self._answerer = create_answerer( + self._config, self._knowledge, self._interface, logger + ) + def _activation_word_in_text(self, activation_word, text): if f"[{normalized(activation_word)}]" in normalized(text): return True diff --git a/wafl/scheduler/web_loop.py b/wafl/scheduler/web_loop.py index 191bc5ae..f43a5a9e 100644 --- a/wafl/scheduler/web_loop.py +++ b/wafl/scheduler/web_loop.py @@ -47,6 +47,7 @@ async def reset_conversation(self): self._interface.deactivate() self._interface.activate() self._conversation_events.reload_knowledge() + self._conversation_events.reset_discourse_memory() await self._interface.output("Hello. How may I help you?") conversation = await self._get_conversation() return conversation From 8384b3288a5e592be64f91ed715f308abb8e8e0c Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Wed, 27 Dec 2023 13:34:36 +0000 Subject: [PATCH 03/11] updated docs --- README.md | 10 ++++++++++ documentation/source/installation.rst | 18 +++++++++++------- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 04672d88..a2c05a74 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,16 @@ Please see the examples in the following chapters. ## LLM side (needs a GPU) The second part (LLM side) is a model server for the speech-to-text model, the LLM, the embedding system, and the text-to-speech model. + +#### Installation +In order to quickly run the LLM side, you can use the following installation commands: +```bash +pip install wafl-llm +wafl-llm start +``` +which will use the default models and start the server on port 8080. + +#### Docker A docker image can be used to run it as in the following: ```bash diff --git a/documentation/source/installation.rst b/documentation/source/installation.rst index 2eb6156d..362a1398 100644 --- a/documentation/source/installation.rst +++ b/documentation/source/installation.rst @@ -32,20 +32,24 @@ Please see the examples in the following chapters. LLM side (needs a GPU) ---------------------- +The second part (LLM side) is a model server for the speech-to-text model, the LLM, the embedding system, and the text-to-speech model. +In order to quickly run the LLM side, you can use the following installation commands: -The second part is a machine that runs on a machine accessible from the interface side. -The initial configuration is for a local deployment of language models. -No action is needed to run WAFL if you want to run it as a local instance. +.. code-block:: bash + + $ pip install wafl-llm + $ wafl-llm start + + which will use the default models and start the server on port 8080. -However, a multi-user setup will benefit for a dedicated server. -In this case, a docker image can be used +Alternatively, a Docker image can be used to run it as in the following: .. code-block:: bash - $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:latest + $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:0.80 The interface side has a `config.json` file that needs to be filled with the IP address of the LLM side. The default is localhost. -Alternatively, you can run the LLM side by cloning `this repository `_. +Finally, you can run the LLM side by cloning [this repository](https://github.com/fractalego/wafl-llm). From ba65a1a3f636291a6cd918cb6ad392e148b8c540 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Wed, 27 Dec 2023 17:03:00 +0000 Subject: [PATCH 04/11] refactored a rule_creator --- wafl/answerer/dialogue_answerer.py | 28 ++++++++++------------------ wafl/answerer/rule_creator.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 18 deletions(-) create mode 100644 wafl/answerer/rule_creator.py diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 049e7115..2a4dc8f0 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -10,6 +10,7 @@ get_last_user_utterance, ) from wafl.answerer.base_answerer import BaseAnswerer +from wafl.answerer.rule_creator import RuleCreator from wafl.connectors.bridges.llm_chitchat_answer_bridge import LLMChitChatAnswerBridge from wafl.exceptions import CloseConversation from wafl.extractors.dataclasses import Query, Answer @@ -30,6 +31,13 @@ def __init__(self, config, knowledge, interface, code_path, logger): self._init_python_module(code_path.replace(".py", "")) self._prior_rule_with_timestamp = None self._max_predictions = 3 + self._rule_creator = RuleCreator( + knowledge, + config, + interface, + max_num_rules=1, + delete_current_rule=self._delete_current_rule, + ) async def answer(self, query_text): if self._logger: @@ -145,24 +153,8 @@ async def _get_relevant_facts( return memory - async def _get_relevant_rules(self, query, max_num_rules=1): - rules = await self._knowledge.ask_for_rule_backward( - query, - knowledge_name="/", - ) - rules = rules[:max_num_rules] - rules_texts = [] - for rule in rules: - rules_text = f"- If {rule.effect.text} go through the following points:\n" - for cause_index, causes in enumerate(rule.causes): - rules_text += f" {cause_index + 1}) {causes.text}\n" - - rules_text += f' {len(rule.causes) + 1}) After you completed all the steps output "{self._delete_current_rule}" and continue the conversation.\n' - - rules_texts.append(rules_text) - await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") - - return "\n".join(rules_texts) + async def _get_relevant_rules(self, query): + return self._rule_creator.create_from_query(query) def _init_python_module(self, module_name): self._module = import_module(module_name) diff --git a/wafl/answerer/rule_creator.py b/wafl/answerer/rule_creator.py new file mode 100644 index 00000000..988148ea --- /dev/null +++ b/wafl/answerer/rule_creator.py @@ -0,0 +1,28 @@ +class RuleCreator: + def __init__( + self, knowledge, config, interface, max_num_rules, delete_current_rule + ): + self._knowledge = knowledge + self._config = config + self._interface = interface + self._max_num_rules = max_num_rules + self._delete_current_rule = delete_current_rule + + def create_from_query(self, query): + rules = await self._knowledge.ask_for_rule_backward( + query, + knowledge_name="/", + ) + rules = rules[: self._max_num_rules] + rules_texts = [] + for rule in rules: + rules_text = f"- If {rule.effect.text} go through the following points:\n" + for cause_index, causes in enumerate(rule.causes): + rules_text += f" {cause_index + 1}) {causes.text}\n" + + rules_text += f' {len(rule.causes) + 1}) After you completed all the steps output "{self._delete_current_rule}" and continue the conversation.\n' + + rules_texts.append(rules_text) + await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") + + return "\n".join(rules_texts) From 320ddbd06d4ab9ccfc9b8929047be7ecc13bce4d Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Wed, 27 Dec 2023 17:21:27 +0000 Subject: [PATCH 05/11] refactored away knowledge_name --- wafl/answerer/dialogue_answerer.py | 2 +- wafl/answerer/rule_creator.py | 20 ++++++++--- wafl/facts.py | 1 - wafl/knowledge/base_knowledge.py | 11 +++--- wafl/knowledge/single_file_knowledge.py | 45 ++++++------------------- wafl/parsing/line_rules_parser.py | 4 +-- wafl/parsing/rules_parser.py | 2 +- 7 files changed, 34 insertions(+), 51 deletions(-) diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 2a4dc8f0..286366b4 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -154,7 +154,7 @@ async def _get_relevant_facts( return memory async def _get_relevant_rules(self, query): - return self._rule_creator.create_from_query(query) + return await self._rule_creator.create_from_query(query) def _init_python_module(self, module_name): self._module = import_module(module_name) diff --git a/wafl/answerer/rule_creator.py b/wafl/answerer/rule_creator.py index 988148ea..94de19bc 100644 --- a/wafl/answerer/rule_creator.py +++ b/wafl/answerer/rule_creator.py @@ -1,14 +1,22 @@ class RuleCreator: def __init__( - self, knowledge, config, interface, max_num_rules, delete_current_rule + self, + knowledge, + config, + interface, + max_num_rules, + delete_current_rule, + max_recursion=1, ): self._knowledge = knowledge self._config = config self._interface = interface self._max_num_rules = max_num_rules self._delete_current_rule = delete_current_rule + self._max_indentation = max_recursion + self._indent_str = " " - def create_from_query(self, query): + async def create_from_query(self, query): rules = await self._knowledge.ask_for_rule_backward( query, knowledge_name="/", @@ -18,11 +26,15 @@ def create_from_query(self, query): for rule in rules: rules_text = f"- If {rule.effect.text} go through the following points:\n" for cause_index, causes in enumerate(rule.causes): - rules_text += f" {cause_index + 1}) {causes.text}\n" + rules_text += f"{self._indent_str}{cause_index + 1}) {causes.text}\n" + rules_text += await self.recursively_add_rules(causes.text) - rules_text += f' {len(rule.causes) + 1}) After you completed all the steps output "{self._delete_current_rule}" and continue the conversation.\n' + rules_text += f'{self._indent_str}{len(rule.causes) + 1}) After you completed all the steps output "{self._delete_current_rule}" and continue the conversation.\n' rules_texts.append(rules_text) await self._interface.add_fact(f"The bot remembers the rule:\n{rules_text}") return "\n".join(rules_texts) + + async def recursively_add_rules(self, query_text): + pass diff --git a/wafl/facts.py b/wafl/facts.py index 4c368210..23db2067 100644 --- a/wafl/facts.py +++ b/wafl/facts.py @@ -9,7 +9,6 @@ class Fact: is_interruption: bool = False source: str = None destination: str = None - knowledge_name: str = "/" def toJSON(self): return str(self) diff --git a/wafl/knowledge/base_knowledge.py b/wafl/knowledge/base_knowledge.py index 04250dd5..00a4d33b 100644 --- a/wafl/knowledge/base_knowledge.py +++ b/wafl/knowledge/base_knowledge.py @@ -4,19 +4,16 @@ class BaseKnowledge: async def add(self, text): raise NotImplementedError() - async def add_rule(self, rule_text, knowledge_name=None): + async def add_rule(self, rule_text): raise NotImplementedError() - async def ask_for_facts(self, query, is_from_user=False, knowledge_name=None): + async def ask_for_facts(self, query, is_from_user=False): raise NotImplementedError() async def ask_for_facts_with_threshold( - self, query, is_from_user=False, knowledge_name=None, threshold=None + self, query, is_from_user=False, threshold=None ): raise NotImplementedError() - async def ask_for_rule_backward(self, query, knowledge_name=None, first_n=None): - raise NotImplementedError() - - async def has_better_match(self, query_text: str) -> bool: + async def ask_for_rule_backward(self, query): raise NotImplementedError() diff --git a/wafl/knowledge/single_file_knowledge.py b/wafl/knowledge/single_file_knowledge.py index cebb4a85..9df9c261 100644 --- a/wafl/knowledge/single_file_knowledge.py +++ b/wafl/knowledge/single_file_knowledge.py @@ -6,7 +6,6 @@ from wafl.config import Configuration from wafl.facts import Fact -from wafl.simple_text_processing.normalize import normalized from wafl.knowledge.base_knowledge import BaseKnowledge from wafl.knowledge.utils import ( text_is_exact_string, @@ -14,7 +13,6 @@ ) from wafl.parsing.line_rules_parser import parse_rule_from_single_line from wafl.parsing.rules_parser import get_facts_and_rules_from_text -from wafl.extractors.dataclasses import Query from wafl.retriever.string_retriever import StringRetriever from wafl.retriever.dense_retriever import DenseRetriever from wafl.text_utils import clean_text_for_retrieval @@ -35,7 +33,7 @@ class SingleFileKnowledge(BaseKnowledge): _threshold_for_partial_facts = 0.6 _max_rules_per_type = 3 - def __init__(self, config, rules_text=None, knowledge_name=None, logger=None): + def __init__(self, config, rules_text=None, logger=None): self._logger = logger self._facts_dict = {} self._rules_dict = {} @@ -50,11 +48,8 @@ def __init__(self, config, rules_text=None, knowledge_name=None, logger=None): self._rules_fact_retriever = DenseRetriever("text_embedding_model", config) self._rules_question_retriever = DenseRetriever("text_embedding_model", config) self._rules_string_retriever = StringRetriever() - knowledge_name = knowledge_name if knowledge_name else self.root_knowledge if rules_text: - facts_and_rules = get_facts_and_rules_from_text( - rules_text, knowledge_name=knowledge_name - ) + facts_and_rules = get_facts_and_rules_from_text(rules_text) self._facts_dict = { f"F{index}": value for index, value in enumerate(facts_and_rules["facts"]) @@ -72,9 +67,9 @@ def __init__(self, config, rules_text=None, knowledge_name=None, logger=None): if not loop or not loop.is_running(): asyncio.run(self._initialize_retrievers()) - async def add(self, text, knowledge_name="/"): + async def add(self, text): fact_index = f"F{len(self._facts_dict)}" - self._facts_dict[fact_index] = Fact(text=text, knowledge_name=knowledge_name) + self._facts_dict[fact_index] = Fact(text=text) await self._facts_retriever.add_text_and_index( clean_text_for_retrieval(text), fact_index ) @@ -82,8 +77,8 @@ async def add(self, text, knowledge_name="/"): clean_text_for_retrieval(text), fact_index ) - async def add_rule(self, rule_text, knowledge_name=None): - rule = parse_rule_from_single_line(rule_text, knowledge_name) + async def add_rule(self, rule_text): + rule = parse_rule_from_single_line(rule_text) index = str(len(self._rules_dict)) index = f"R{index}" self._rules_dict[index] = rule @@ -91,21 +86,7 @@ async def add_rule(self, rule_text, knowledge_name=None): clean_text_for_retrieval(rule.effect.text), index=index ) - async def has_better_match(self, query_text: str) -> bool: - if any(normalized(query_text).find(item) == 0 for item in ["yes", "no"]): - return False - - if any(normalized(query_text).find(item) != -1 for item in [" yes ", " no "]): - return False - - rules = await self.ask_for_rule_backward( - Query(text=f"The user says to the bot: '{query_text}.'", is_question=False) - ) - return any(rule.effect.is_interruption for rule in rules) - - async def ask_for_facts( - self, query, is_from_user=False, knowledge_name=None, threshold=None - ): + async def ask_for_facts(self, query, is_from_user=False, threshold=None): if query.is_question: indices_and_scores = await self._facts_retriever_for_questions.get_indices_and_scores_from_text( query.text @@ -135,7 +116,7 @@ async def ask_for_facts( ] async def ask_for_facts_with_threshold( - self, query, is_from_user=False, knowledge_name=None, threshold=None + self, query, is_from_user=False, threshold=None ): if query.is_question: indices_and_scores = await self._facts_retriever_for_questions.get_indices_and_scores_from_text( @@ -167,10 +148,8 @@ async def ask_for_facts_with_threshold( if item[1] > threshold ] - async def ask_for_rule_backward(self, query, knowledge_name=None, first_n=None): - rules_and_scores = await self._ask_for_rule_backward_with_scores( - query, knowledge_name, first_n - ) + async def ask_for_rule_backward(self, query): + rules_and_scores = await self._ask_for_rule_backward_with_scores(query) return get_first_cluster_of_rules(rules_and_scores) def get_facts_and_rule_as_text(self): @@ -237,9 +216,7 @@ async def create_from_list( return knowledge - async def _ask_for_rule_backward_with_scores( - self, query, knowledge_name=None, first_n=None - ): + async def _ask_for_rule_backward_with_scores(self, query): if text_is_exact_string(query.text): indices_and_scores = ( await self._rules_string_retriever.get_indices_and_scores_from_text( diff --git a/wafl/parsing/line_rules_parser.py b/wafl/parsing/line_rules_parser.py index a24c1845..8dbdb77d 100644 --- a/wafl/parsing/line_rules_parser.py +++ b/wafl/parsing/line_rules_parser.py @@ -3,7 +3,7 @@ from wafl.rules import Rule -def parse_rule_from_single_line(text, knowledge_name=None): +def parse_rule_from_single_line(text): if ":-" not in text: return None @@ -12,13 +12,11 @@ def parse_rule_from_single_line(text, knowledge_name=None): effect = Fact( text=effect_text.strip(), is_question=is_question(effect_text), - knowledge_name=knowledge_name, ) causes = [ Fact( text=item.strip(), is_question=is_question(item), - knowledge_name=knowledge_name, ) for item in causes_text.split(";") ] diff --git a/wafl/parsing/rules_parser.py b/wafl/parsing/rules_parser.py index d77a08ff..ceb9fc57 100644 --- a/wafl/parsing/rules_parser.py +++ b/wafl/parsing/rules_parser.py @@ -5,7 +5,7 @@ from wafl.simple_text_processing.deixis import from_user_to_bot -def get_facts_and_rules_from_text(text: str, knowledge_name=None): +def get_facts_and_rules_from_text(text: str): parsed_text_dict = yaml.safe_load(text) fact_strings = parsed_text_dict.get("facts", []) rules_list = parsed_text_dict.get("rules", {}) From c44d1e7a5fbea4b5ff7dcb9b06082dd2348e06a0 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Wed, 27 Dec 2023 17:30:35 +0000 Subject: [PATCH 06/11] refactored rules retrieval and added second order rules --- wafl/answerer/rule_creator.py | 14 +++- wafl/knowledge/base_knowledge.py | 2 +- wafl/knowledge/single_file_knowledge.py | 88 ++++++------------------- 3 files changed, 32 insertions(+), 72 deletions(-) diff --git a/wafl/answerer/rule_creator.py b/wafl/answerer/rule_creator.py index 94de19bc..b67f9b6c 100644 --- a/wafl/answerer/rule_creator.py +++ b/wafl/answerer/rule_creator.py @@ -36,5 +36,15 @@ async def create_from_query(self, query): return "\n".join(rules_texts) - async def recursively_add_rules(self, query_text): - pass + async def recursively_add_rules(self, query_text, depth=2): + rules = await self._knowledge.ask_for_rule_backward(query_text, threshold=0.95) + rules = rules[: self._max_num_rules] + rules_texts = [] + for rule in rules: + rules_text = f"- If {rule.effect.text} go through the following points:\n" + for cause_index, causes in enumerate(rule.causes): + indentation = self._indent_str * depth + rules_text += f"{indentation}{cause_index + 1}) {causes.text}\n" + rules_text += await self.recursively_add_rules(causes.text, depth + 1) + + rules_texts.append(rules_text) diff --git a/wafl/knowledge/base_knowledge.py b/wafl/knowledge/base_knowledge.py index 00a4d33b..0ad966cd 100644 --- a/wafl/knowledge/base_knowledge.py +++ b/wafl/knowledge/base_knowledge.py @@ -15,5 +15,5 @@ async def ask_for_facts_with_threshold( ): raise NotImplementedError() - async def ask_for_rule_backward(self, query): + async def ask_for_rule_backward(self, query, threshold=None): raise NotImplementedError() diff --git a/wafl/knowledge/single_file_knowledge.py b/wafl/knowledge/single_file_knowledge.py index 9df9c261..02805389 100644 --- a/wafl/knowledge/single_file_knowledge.py +++ b/wafl/knowledge/single_file_knowledge.py @@ -28,9 +28,7 @@ class SingleFileKnowledge(BaseKnowledge): _threshold_for_questions_from_bot = 0.6 _threshold_for_questions_in_rules = 0.49 _threshold_for_facts = 0.4 - _threshold_for_fact_rules = 0.6 - _threshold_for_fact_rules_for_creation = 0.6 - _threshold_for_partial_facts = 0.6 + _threshold_for_rules = 0.9 _max_rules_per_type = 3 def __init__(self, config, rules_text=None, logger=None): @@ -42,11 +40,7 @@ def __init__(self, config, rules_text=None, logger=None): "text_embedding_model", config, ) - self._rules_incomplete_retriever = DenseRetriever( - "text_embedding_model", config - ) - self._rules_fact_retriever = DenseRetriever("text_embedding_model", config) - self._rules_question_retriever = DenseRetriever("text_embedding_model", config) + self._rules_retriever = DenseRetriever("text_embedding_model", config) self._rules_string_retriever = StringRetriever() if rules_text: facts_and_rules = get_facts_and_rules_from_text(rules_text) @@ -82,7 +76,7 @@ async def add_rule(self, rule_text): index = str(len(self._rules_dict)) index = f"R{index}" self._rules_dict[index] = rule - await self._rules_fact_retriever.add_text_and_index( + await self._rules_retriever.add_text_and_index( clean_text_for_retrieval(rule.effect.text), index=index ) @@ -148,8 +142,10 @@ async def ask_for_facts_with_threshold( if item[1] > threshold ] - async def ask_for_rule_backward(self, query): - rules_and_scores = await self._ask_for_rule_backward_with_scores(query) + async def ask_for_rule_backward(self, query, threshold=None): + rules_and_scores = await self._ask_for_rule_backward_with_scores( + query, threshold=threshold + ) return get_first_cluster_of_rules(rules_and_scores) def get_facts_and_rule_as_text(self): @@ -179,21 +175,9 @@ async def _initialize_retrievers(self): if text_is_exact_string(rule.effect.text): continue - if "{" in rule.effect.text: - await self._rules_incomplete_retriever.add_text_and_index( - clean_text_for_retrieval(rule.effect.text), index - ) - continue - - elif rule.effect.is_question: - await self._rules_question_retriever.add_text_and_index( - clean_text_for_retrieval(rule.effect.text), index - ) - - else: - await self._rules_fact_retriever.add_text_and_index( - clean_text_for_retrieval(rule.effect.text), index - ) + await self._rules_retriever.add_text_and_index( + clean_text_for_retrieval(rule.effect.text), index + ) for index, rule in self._rules_dict.items(): if not text_is_exact_string(rule.effect.text): @@ -216,7 +200,7 @@ async def create_from_list( return knowledge - async def _ask_for_rule_backward_with_scores(self, query): + async def _ask_for_rule_backward_with_scores(self, query, threshold=None): if text_is_exact_string(query.text): indices_and_scores = ( await self._rules_string_retriever.get_indices_and_scores_from_text( @@ -226,55 +210,21 @@ async def _ask_for_rule_backward_with_scores(self, query): return [(self._rules_dict[item[0]], item[1]) for item in indices_and_scores] indices_and_scores = ( - await self._rules_fact_retriever.get_indices_and_scores_from_text( - query.text - ) + await self._rules_retriever.get_indices_and_scores_from_text(query.text) ) - if not first_n: - fact_rules = [ - (self._rules_dict[item[0]], item[1]) - for item in indices_and_scores - if item[1] > self._threshold_for_fact_rules - ] - else: - fact_rules = [ - (self._rules_dict[item[0]], item[1]) - for item in indices_and_scores - if item[1] > self._threshold_for_fact_rules_for_creation - ] - - fact_rules = [item for item in sorted(fact_rules, key=lambda x: -x[1])][ - : self._max_rules_per_type - ] + if threshold == None: + threshold = self._threshold_for_rules - indices_and_scores = ( - await self._rules_question_retriever.get_indices_and_scores_from_text( - query.text - ) - ) - question_rules = [ + rules = [ (self._rules_dict[item[0]], item[1]) for item in indices_and_scores - if item[1] > self._threshold_for_questions_in_rules - ] - question_rules = [item for item in sorted(question_rules, key=lambda x: -x[1])][ - : self._max_rules_per_type + if item[1] > threshold ] - indices_and_scores = ( - await self._rules_incomplete_retriever.get_indices_and_scores_from_text( - query.text - ) - ) - incomplete_rules = [ - (self._rules_dict[item[0]], item[1]) - for item in indices_and_scores - if item[1] > self._threshold_for_partial_facts + rules = [item for item in sorted(rules, key=lambda x: -x[1])][ + : self._max_rules_per_type ] - incomplete_rules = [ - item for item in sorted(incomplete_rules, key=lambda x: -x[1]) - ][: self._max_rules_per_type] - rules_and_scores = fact_rules + question_rules + incomplete_rules + rules_and_scores = rules return rules_and_scores From da441c3374ae4b44285668560eb76e43c515bbef Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Wed, 27 Dec 2023 17:41:54 +0000 Subject: [PATCH 07/11] changed retrieval thresholds --- todo.txt | 3 ++- wafl/answerer/dialogue_answerer.py | 2 +- wafl/answerer/rule_creator.py | 17 ++++++++--------- wafl/knowledge/single_file_knowledge.py | 4 ++-- wafl/rules.py | 1 - 5 files changed, 13 insertions(+), 14 deletions(-) diff --git a/todo.txt b/todo.txt index 3dd3f1f8..94f6c48b 100644 --- a/todo.txt +++ b/todo.txt @@ -1,7 +1,8 @@ ### TODO -* create actions from command line +/* create actions from command line * add condition of when to stop to the actions + * Perhaps the expectation pattern could be build in the rules themselves * BUG: the prior memory leaks even when re-loading the interface!!! diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 286366b4..37618957 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -127,7 +127,7 @@ async def _get_relevant_facts( > conversational_timestamp - self._max_num_past_utterances_for_facts ] facts_and_thresholds = await self._knowledge.ask_for_facts_with_threshold( - query, is_from_user=True, knowledge_name="/", threshold=0.8 + query, is_from_user=True, threshold=0.8 ) if facts_and_thresholds: facts = [ diff --git a/wafl/answerer/rule_creator.py b/wafl/answerer/rule_creator.py index b67f9b6c..a1a49ba0 100644 --- a/wafl/answerer/rule_creator.py +++ b/wafl/answerer/rule_creator.py @@ -17,17 +17,14 @@ def __init__( self._indent_str = " " async def create_from_query(self, query): - rules = await self._knowledge.ask_for_rule_backward( - query, - knowledge_name="/", - ) + rules = await self._knowledge.ask_for_rule_backward(query) rules = rules[: self._max_num_rules] rules_texts = [] for rule in rules: rules_text = f"- If {rule.effect.text} go through the following points:\n" - for cause_index, causes in enumerate(rule.causes): - rules_text += f"{self._indent_str}{cause_index + 1}) {causes.text}\n" - rules_text += await self.recursively_add_rules(causes.text) + for cause_index, cause in enumerate(rule.causes): + rules_text += f"{self._indent_str}{cause_index + 1}) {cause.text}\n" + rules_text += await self.recursively_add_rules(cause) rules_text += f'{self._indent_str}{len(rule.causes) + 1}) After you completed all the steps output "{self._delete_current_rule}" and continue the conversation.\n' @@ -36,8 +33,8 @@ async def create_from_query(self, query): return "\n".join(rules_texts) - async def recursively_add_rules(self, query_text, depth=2): - rules = await self._knowledge.ask_for_rule_backward(query_text, threshold=0.95) + async def recursively_add_rules(self, query, depth=2): + rules = await self._knowledge.ask_for_rule_backward(query, threshold=0.95) rules = rules[: self._max_num_rules] rules_texts = [] for rule in rules: @@ -48,3 +45,5 @@ async def recursively_add_rules(self, query_text, depth=2): rules_text += await self.recursively_add_rules(causes.text, depth + 1) rules_texts.append(rules_text) + + return "\n".join(rules_texts) diff --git a/wafl/knowledge/single_file_knowledge.py b/wafl/knowledge/single_file_knowledge.py index 02805389..e2cc0621 100644 --- a/wafl/knowledge/single_file_knowledge.py +++ b/wafl/knowledge/single_file_knowledge.py @@ -27,8 +27,8 @@ class SingleFileKnowledge(BaseKnowledge): _threshold_for_questions_from_user = 0.55 _threshold_for_questions_from_bot = 0.6 _threshold_for_questions_in_rules = 0.49 - _threshold_for_facts = 0.4 - _threshold_for_rules = 0.9 + _threshold_for_facts = 0.7 + _threshold_for_rules = 0.85 _max_rules_per_type = 3 def __init__(self, config, rules_text=None, logger=None): diff --git a/wafl/rules.py b/wafl/rules.py index 76fb5a48..3e3d32f6 100644 --- a/wafl/rules.py +++ b/wafl/rules.py @@ -6,7 +6,6 @@ class Rule: effect: "Fact" causes: List["Fact"] - knowledge_name: str = "/" def toJSON(self): return str(self) From 713aab52003ab368639861c7bba35ad8abf454f8 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Thu, 28 Dec 2023 11:34:58 +0000 Subject: [PATCH 08/11] updated testcases with LLM entailment --- tests/test_testcases.py | 40 +++++++++++++++++++++++++ todo.txt | 4 +-- wafl/answerer/dialogue_answerer.py | 3 +- wafl/answerer/entailer.py | 47 ++++++++++++++++++++++++++++++ wafl/retriever/dense_retriever.py | 3 -- wafl/testcases.py | 32 ++++++++++---------- 6 files changed, 106 insertions(+), 23 deletions(-) create mode 100644 tests/test_testcases.py create mode 100644 wafl/answerer/entailer.py diff --git a/tests/test_testcases.py b/tests/test_testcases.py new file mode 100644 index 00000000..3a5fd0c7 --- /dev/null +++ b/tests/test_testcases.py @@ -0,0 +1,40 @@ +import asyncio +from unittest import TestCase + +from wafl.config import Configuration +from wafl.testcases import ConversationTestCases + +_wafl_example = """ +rules: + - the user says "hello": + - You must answer the user by writing "Hello. What is your name" + + - the user says their name: + - You must answer the user by writing "nice to meet you, NAME_OF_THE_USER!" +""".strip() + + +_test_case_greetings = """ + +test the greetings work + user: Hello + bot: Hello there! What is your name + user: Bob + bot: Nice to meet you, bob! + +! test the greetings uses the correct name + user: Hello + bot: Hello there! What is your name + user: Bob + bot: Nice to meet you, unknown! + + +""".strip() + + +class TestConversationalTestCases(TestCase): + def test_conversation_testcase_run_all(self): + config = Configuration.load_local_config() + config.set_value("rules", _wafl_example) + testcase = ConversationTestCases(config=config, text=_test_case_greetings) + self.assertTrue(asyncio.run(testcase.run())) diff --git a/todo.txt b/todo.txt index 94f6c48b..673a7c63 100644 --- a/todo.txt +++ b/todo.txt @@ -1,10 +1,10 @@ ### TODO /* create actions from command line - * add condition of when to stop to the actions + * add condition of when to stop to the actions #### EXPECTATION PATTERN * Perhaps the expectation pattern could be build in the rules themselves -* BUG: the prior memory leaks even when re-loading the interface!!! +/* BUG: the prior memory leaks even when re-loading the interface!!! * clean single_file_knowledge: it still divides facts, question, and incomplete for rule retrieval. Use just one retriever and threshold for all diff --git a/wafl/answerer/dialogue_answerer.py b/wafl/answerer/dialogue_answerer.py index 37618957..7fe8c68c 100644 --- a/wafl/answerer/dialogue_answerer.py +++ b/wafl/answerer/dialogue_answerer.py @@ -43,9 +43,8 @@ async def answer(self, query_text): if self._logger: self._logger.write(f"Dialogue Answerer: the query is {query_text}") - query = Query.create_from_text(query_text) + query = Query.create_from_text("The user says: " + query_text) rules_texts = await self._get_relevant_rules(query) - dialogue = self._interface.get_utterances_list_with_timestamp()[ -self._max_num_past_utterances : ] diff --git a/wafl/answerer/entailer.py b/wafl/answerer/entailer.py new file mode 100644 index 00000000..1bf974ac --- /dev/null +++ b/wafl/answerer/entailer.py @@ -0,0 +1,47 @@ +import os + +from wafl.connectors.factories.llm_connector_factory import LLMConnectorFactory + +_path = os.path.dirname(__file__) + + +class Entailer: + def __init__(self, config): + self._connector = LLMConnectorFactory.get_connector(config) + self._config = config + + async def left_entails_right(self, lhs: str, rhs: str, dialogue: str) -> str: + prompt = await self._get_answer_prompt(lhs, rhs, dialogue) + result = await self._connector.generate(prompt) + result = self._clean_result(result) + print(result) + return result == "yes" + + async def _get_answer_prompt(self, lhs, rhs, dialogue): + prompt = f""" + +This is a conversation between two agents ("bot" and "user"): +bot: what can I do for you? + +Given this dialogue, the task is to determine whether the following two utterances have the same meaning: +1) user: I need to book a flight to Paris. +2) user: I'd like to buy a plane ticket to paris. +Please answer "yes" or "no": yes + + + +This is a conversation between two agents ("bot" and "user"): +{dialogue} + +Given this dialogue, the task is to determine whether the following two utterances have the same meaning: +1) {lhs.lower()} +2) {rhs.lower()} +Please answer "yes" or "no": + """.strip() + return prompt + + def _clean_result(self, result): + result = result.replace("", "") + result = result.split("\n")[0] + result = result.strip() + return result.lower() diff --git a/wafl/retriever/dense_retriever.py b/wafl/retriever/dense_retriever.py index d6285267..4bf3bb8c 100644 --- a/wafl/retriever/dense_retriever.py +++ b/wafl/retriever/dense_retriever.py @@ -28,9 +28,6 @@ async def add_text_and_index(self, text: str, index: str): async def get_indices_and_scores_from_text( self, text: str ) -> List[Tuple[str, float]]: - if not text or len(text) < self._threshold_length: - return [] - embeddings = await self._get_embeddings_from_text(text) return self._embeddings_model.similar_by_vector(embeddings, topn=5) diff --git a/wafl/testcases.py b/wafl/testcases.py index 19ee3f84..017ad0f8 100644 --- a/wafl/testcases.py +++ b/wafl/testcases.py @@ -1,5 +1,4 @@ -from fuzzywuzzy import fuzz -from wafl.events.utils import load_knowledge +from wafl.answerer.entailer import Entailer from wafl.simple_text_processing.deixis import from_user_to_bot, from_bot_to_user from wafl.exceptions import CloseConversation from wafl.events.conversation_events import ConversationEvents @@ -13,10 +12,10 @@ class ConversationTestCases: GREEN_COLOR_START = "\033[32m" COLOR_END = "\033[0m" - def __init__(self, config, text, code_path=None, logger=None): + def __init__(self, config, text, logger=None): + self._config = config self._testcase_data = get_user_and_bot_lines_from_text(text) - self._knowledge = load_knowledge(config, logger) - self._code_path = code_path if code_path else "/" + self._entailer = Entailer(config) async def test_single_case(self, name): if name not in self._testcase_data: @@ -26,12 +25,10 @@ async def test_single_case(self, name): test_lines = self._testcase_data[name]["lines"] is_negated = self._testcase_data[name]["negated"] interface = DummyInterface(user_lines) - conversation_events = ConversationEvents( - self._knowledge, interface=interface, code_path=self._code_path - ) + conversation_events = ConversationEvents(self._config, interface=interface) + await conversation_events._knowledge._initialize_retrievers() print(self.BLUE_COLOR_START + f"\nRunning test '{name}'." + self.COLOR_END) - continue_conversations = True while continue_conversations: try: @@ -42,14 +39,18 @@ async def test_single_case(self, name): is_consistent = True generated_lines = interface.get_utterances_list() + prior_dialogue = [] for test_line, generated_line in zip(test_lines, generated_lines): - test_line = self._apply_deixis(test_line) - if not await self._lhs_is_similar_to(generated_line, test_line): + if not await self._lhs_is_similar_to( + generated_line, test_line, prior_dialogue + ): print(f" [test_line] {test_line}") print(f" [predicted_line] {generated_line}") is_consistent = False break + prior_dialogue.append(generated_line) + if (is_consistent and not is_negated) or (not is_consistent and is_negated): print(self.GREEN_COLOR_START + " [Success]\n\n" + self.COLOR_END) return True @@ -63,7 +64,6 @@ async def test_single_case(self, name): async def run(self): to_return = True - for name in self._testcase_data: result = await self.test_single_case(name) if not result: @@ -71,15 +71,15 @@ async def run(self): return to_return - async def _lhs_is_similar_to(self, lhs, rhs): + async def _lhs_is_similar_to(self, lhs, rhs, prior_dialogue): lhs_name = lhs.split(":")[0].strip() rhs_name = rhs.split(":")[0].strip() if lhs_name != rhs_name: return False - lhs = ":".join(item.strip() for item in lhs.split(":")[1:]) - rhs = ":".join(item.strip() for item in rhs.split(":")[1:]) - return fuzz.ratio(lhs, rhs) > 80 + return await self._entailer.left_entails_right( + lhs, rhs, "\n".join(prior_dialogue) + ) def _apply_deixis(self, line): name = line.split(":")[0].strip() From ee310ce0598a87b811ae18f87b9a3d7bea23973b Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Thu, 28 Dec 2023 16:27:17 +0000 Subject: [PATCH 09/11] actions have expected behavior --- todo.txt | 11 ++++- wafl/answerer/entailer.py | 1 - wafl/command_line.py | 4 +- wafl/events/conversation_events.py | 1 + wafl/run.py | 24 +---------- wafl/runners/run_from_actions.py | 68 ++++++++++++++++++++++++++++++ wafl/templates/actions.yaml | 8 +++- wafl/templates/functions.py | 2 - 8 files changed, 87 insertions(+), 32 deletions(-) create mode 100644 wafl/runners/run_from_actions.py diff --git a/todo.txt b/todo.txt index 673a7c63..d0d39bbb 100644 --- a/todo.txt +++ b/todo.txt @@ -1,8 +1,15 @@ ### TODO /* create actions from command line - * add condition of when to stop to the actions #### EXPECTATION PATTERN - * Perhaps the expectation pattern could be build in the rules themselves + /* add condition of when to stop to the actions + +#### Find way to delete cache in remote llm connector +#### Put colors in action output (and dummy interface) +#### Add green for when an expectation is matched +#### write docs about actions +#### push new version to main + +* Perhaps the expectation pattern could be build in the rules themselves /* BUG: the prior memory leaks even when re-loading the interface!!! diff --git a/wafl/answerer/entailer.py b/wafl/answerer/entailer.py index 1bf974ac..0daad8f3 100644 --- a/wafl/answerer/entailer.py +++ b/wafl/answerer/entailer.py @@ -14,7 +14,6 @@ async def left_entails_right(self, lhs: str, rhs: str, dialogue: str) -> str: prompt = await self._get_answer_prompt(lhs, rhs, dialogue) result = await self._connector.generate(prompt) result = self._clean_result(result) - print(result) return result == "yes" async def _get_answer_prompt(self, lhs, rhs, dialogue): diff --git a/wafl/command_line.py b/wafl/command_line.py index 40d071a5..14fbb6cd 100644 --- a/wafl/command_line.py +++ b/wafl/command_line.py @@ -7,9 +7,9 @@ run_from_command_line, run_testcases, print_incipit, - download_models, - run_action, + download_models ) +from wafl.runners.run_from_actions import run_action from wafl.runners.run_from_audio import run_from_audio from wafl.runners.run_web_interface import run_app diff --git a/wafl/events/conversation_events.py b/wafl/events/conversation_events.py index a63eef18..cf38856b 100644 --- a/wafl/events/conversation_events.py +++ b/wafl/events/conversation_events.py @@ -21,6 +21,7 @@ def __init__( self._config = config self._knowledge = load_knowledge(config, logger) self._answerer = create_answerer(config, self._knowledge, interface, logger) + self._answerer._bridge._connector._cache = {} self._interface = interface self._logger = logger self._is_computing = False diff --git a/wafl/run.py b/wafl/run.py index 92e9a8d7..4f664ecb 100644 --- a/wafl/run.py +++ b/wafl/run.py @@ -1,12 +1,9 @@ import asyncio -import yaml - from wafl.config import Configuration from wafl.exceptions import CloseConversation from wafl.events.conversation_events import ConversationEvents from wafl.interface.command_line_interface import CommandLineInterface -from wafl.interface.dummy_interface import DummyInterface from wafl.logger.local_file_logger import LocalFileLogger from wafl.testcases import ConversationTestCases from wafl.variables import get_variables @@ -51,27 +48,8 @@ def run_testcases(): asyncio.run(testcases.run()) -def run_action(action_name): - print(f"Running the action {action_name}\n") - actions = yaml.safe_load(open("actions.yaml")) - if action_name not in actions: - raise ValueError(f"Action {action_name} not found in actions.yaml") - - actions_list = actions[action_name] - interface = DummyInterface(to_utter=actions_list.copy(), print_utterances=True) - config = Configuration.load_local_config() - conversation_events = ConversationEvents( - config=config, - interface=interface, - logger=_logger, - ) - for action in actions_list: - asyncio.run(conversation_events.process_next()) - - print(f"Action {action_name} finished.") - - def download_models(): import nltk nltk.download("averaged_perceptron_tagger") + diff --git a/wafl/runners/run_from_actions.py b/wafl/runners/run_from_actions.py new file mode 100644 index 00000000..bf0468b4 --- /dev/null +++ b/wafl/runners/run_from_actions.py @@ -0,0 +1,68 @@ +import asyncio + +import yaml + +from wafl.answerer.entailer import Entailer +from wafl.config import Configuration +from wafl.events.conversation_events import ConversationEvents +from wafl.interface.dummy_interface import DummyInterface +from wafl.logger.local_file_logger import LocalFileLogger + +_logger = LocalFileLogger() + + +def get_action_list_and_expeted_list_from_yaml(filename, action_name): + actions = yaml.safe_load(open("actions.yaml")) + if action_name not in actions: + raise ValueError(f"Action {action_name} not found in actions.yaml") + + actions_list = [item["action"] for item in actions[action_name]] + expected_list = [item["expected"] for item in actions[action_name]] + return actions_list, expected_list + + +def predict_action(config, actions_list, expected_list): + interface = DummyInterface(to_utter=actions_list.copy(), print_utterances=True) + conversation_events = ConversationEvents( + config=config, + interface=interface, + logger=_logger, + ) + entailer = Entailer(config) + for expected in expected_list: + asyncio.run(conversation_events.process_next()) + last_utterance = interface.get_utterances_list()[-1] + if expected and not asyncio.run( + entailer.left_entails_right( + last_utterance, + expected, + "\n".join(interface.get_utterances_list()[:-1]), + ) + ): + del entailer, conversation_events, interface + raise ValueError( + f"The utterance '{last_utterance}' does not entail '{expected}'." + ) + + +def run_action(action_name): + print(f"Running the action {action_name}\n") + actions_list, expected_list = get_action_list_and_expeted_list_from_yaml( + "actions.yaml", action_name + ) + config = Configuration.load_local_config() + num_retries = 10 + for _ in range(num_retries): + try: + predict_action(config, actions_list, expected_list) + break + + except (ValueError, SyntaxError) as e: + print(e) + print(f"Retrying the action {action_name}...") + + print(f"Action {action_name} finished.") + + +if __name__ == "__main__": + run_action("action_1_summarise_guardian") diff --git a/wafl/templates/actions.yaml b/wafl/templates/actions.yaml index 7433d9ab..f8d5133f 100644 --- a/wafl/templates/actions.yaml +++ b/wafl/templates/actions.yaml @@ -1,3 +1,7 @@ action_1_summarise_guardian: - - summarise https://theguardian.com - - write the output to the file summary_guardian.txt + - + action: get the news from the guardian website + expected: the bot outputs a list of headlines + - + action: write the bullet points above to the file summary_1.txt + expected: the bot saves the headlines to the file summary_1.txt \ No newline at end of file diff --git a/wafl/templates/functions.py b/wafl/templates/functions.py index 4936c99b..f30833c3 100644 --- a/wafl/templates/functions.py +++ b/wafl/templates/functions.py @@ -90,7 +90,6 @@ def add_to_shopping_list(list_of_items_to_add): db["shopping_list"].append(item) json.dump(db, open(_db_filename, "w")) - return "Item added" @@ -101,7 +100,6 @@ def remove_shopping_list(list_of_items_to_remove): db["shopping_list"].remove(item) json.dump(db, open(_db_filename, "w")) - return "Item removed" From b06cf2df2f5246a30167d9e2ff581312f9cf3ece Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Thu, 28 Dec 2023 19:41:37 +0000 Subject: [PATCH 10/11] added colors to actions --- wafl/interface/dummy_interface.py | 7 +++++-- wafl/runners/run_from_actions.py | 22 ++++++++++++++++++---- wafl/templates/actions.yaml | 4 ++-- wafl/templates/functions.py | 31 ++++++++++++++++++++++++++----- 4 files changed, 51 insertions(+), 13 deletions(-) diff --git a/wafl/interface/dummy_interface.py b/wafl/interface/dummy_interface.py index 3a104957..e651b2e1 100644 --- a/wafl/interface/dummy_interface.py +++ b/wafl/interface/dummy_interface.py @@ -5,6 +5,9 @@ from wafl.interface.base_interface import BaseInterface from wafl.interface.utils import not_good_enough +COLOR_START = "\033[94m" +COLOR_END = "\033[0m" + class DummyInterface(BaseInterface): def __init__(self, to_utter=None, print_utterances=False): @@ -20,7 +23,7 @@ async def output(self, text: str, silent: bool = False): print(text) else: - print("bot> " + text) + print(COLOR_START + "bot> " + text + COLOR_END) if not silent: self._dialogue += "bot: " + text + "\n" @@ -31,7 +34,7 @@ async def input(self) -> str: text = self._to_utter.pop(0).strip() text = self.__remove_activation_word_and_normalize(text) if self._print_utterances: - print("user> " + text) + print(COLOR_START + "user> " + text + COLOR_END) while self._is_listening and not_good_enough(text): await self.output("I did not quite understand that") diff --git a/wafl/runners/run_from_actions.py b/wafl/runners/run_from_actions.py index bf0468b4..c4868874 100644 --- a/wafl/runners/run_from_actions.py +++ b/wafl/runners/run_from_actions.py @@ -10,6 +10,10 @@ _logger = LocalFileLogger() +COLOR_YELLOW = "\033[93m" +COLOR_GREEN = "\033[92m" +COLOR_END = "\033[0m" + def get_action_list_and_expeted_list_from_yaml(filename, action_name): actions = yaml.safe_load(open("actions.yaml")) @@ -32,6 +36,10 @@ def predict_action(config, actions_list, expected_list): for expected in expected_list: asyncio.run(conversation_events.process_next()) last_utterance = interface.get_utterances_list()[-1] + + if not last_utterance: + raise ValueError("The agent did not say anything.") + if expected and not asyncio.run( entailer.left_entails_right( last_utterance, @@ -46,22 +54,28 @@ def predict_action(config, actions_list, expected_list): def run_action(action_name): - print(f"Running the action {action_name}\n") + print(COLOR_GREEN + f"Running the action {action_name}\n" + COLOR_END) actions_list, expected_list = get_action_list_and_expeted_list_from_yaml( "actions.yaml", action_name ) config = Configuration.load_local_config() num_retries = 10 + success = False for _ in range(num_retries): try: predict_action(config, actions_list, expected_list) + success = True break except (ValueError, SyntaxError) as e: - print(e) - print(f"Retrying the action {action_name}...") + print(COLOR_YELLOW + str(e) + COLOR_END) + print(COLOR_GREEN + f"Retrying the action {action_name}..." + COLOR_END) + + if success: + print(COLOR_GREEN + f"Action {action_name} finished." + COLOR_END) - print(f"Action {action_name} finished.") + else: + print(COLOR_YELLOW + f"Action {action_name} failed." + COLOR_END) if __name__ == "__main__": diff --git a/wafl/templates/actions.yaml b/wafl/templates/actions.yaml index f8d5133f..8edeb6a8 100644 --- a/wafl/templates/actions.yaml +++ b/wafl/templates/actions.yaml @@ -3,5 +3,5 @@ action_1_summarise_guardian: action: get the news from the guardian website expected: the bot outputs a list of headlines - - action: write the bullet points above to the file summary_1.txt - expected: the bot saves the headlines to the file summary_1.txt \ No newline at end of file + action: write the list of news headlines above as a text into the file summary_news.txt + expected: \ No newline at end of file diff --git a/wafl/templates/functions.py b/wafl/templates/functions.py index f30833c3..0d001f89 100644 --- a/wafl/templates/functions.py +++ b/wafl/templates/functions.py @@ -1,7 +1,8 @@ import json +import html2text +import re import requests -from bs4 import BeautifulSoup from datetime import datetime, timedelta from wafl.exceptions import CloseConversation @@ -65,10 +66,21 @@ def check_weather_lat_long(latitude, longitude, day): def get_website(url): - response = requests.get(url) - soup = BeautifulSoup(response.content, "html.parser") - text = soup.get_text() - return text + text = requests.get(url).content.decode("utf-8") + h = html2text.HTML2Text() + h.ignore_links = True + return h.handle(text).strip()[:1000] + + +def get_guardian_headlines(): + url = "https://www.theguardian.com/uk" + text = requests.get(url).content.decode("utf-8") + pattern = re.compile(r"

(.*?)

", re.MULTILINE) + matches = pattern.findall(text) + text = "-" + "\n-".join(matches) + h = html2text.HTML2Text() + h.ignore_links = True + return h.handle(text).strip() def get_time(): @@ -90,6 +102,7 @@ def add_to_shopping_list(list_of_items_to_add): db["shopping_list"].append(item) json.dump(db, open(_db_filename, "w")) + return "Item added" @@ -100,6 +113,7 @@ def remove_shopping_list(list_of_items_to_remove): db["shopping_list"].remove(item) json.dump(db, open(_db_filename, "w")) + return "Item removed" @@ -109,3 +123,10 @@ def get_shopping_list(): return "nothing" return ", ".join(db["shopping_list"]) + + +def write_to_file(filename, text): + with open(filename, "w") as file: + file.write(text) + + return f"File {filename} saved" From d73729c3b292cd49711094488ad0939910d304d8 Mon Sep 17 00:00:00 2001 From: Alberto Cetoli Date: Fri, 29 Dec 2023 15:36:05 +0000 Subject: [PATCH 11/11] wrote docs for actions and testcases --- .../build/doctrees/environment.pickle | Bin 98168 -> 112416 bytes documentation/build/doctrees/index.doctree | Bin 5136 -> 5168 bytes .../build/doctrees/installation.doctree | Bin 7611 -> 7679 bytes .../build/html/_sources/index.rst.txt | 2 + .../build/html/_sources/installation.rst.txt | 18 ++++--- documentation/build/html/examples.html | 2 + documentation/build/html/genindex.html | 2 + documentation/build/html/index.html | 8 +++ documentation/build/html/installation.html | 22 +++++--- documentation/build/html/introduction.html | 2 + documentation/build/html/license.html | 6 ++- documentation/build/html/objects.inv | Bin 460 -> 494 bytes documentation/build/html/running_WAFL.html | 2 + documentation/build/html/search.html | 2 + documentation/build/html/searchindex.js | 2 +- documentation/source/actions.rst | 42 +++++++++++++++ documentation/source/index.rst | 2 + documentation/source/testcases.rst | 49 ++++++++++++++++++ wafl/variables.py | 2 +- 19 files changed, 144 insertions(+), 19 deletions(-) create mode 100644 documentation/source/actions.rst create mode 100644 documentation/source/testcases.rst diff --git a/documentation/build/doctrees/environment.pickle b/documentation/build/doctrees/environment.pickle index 7b5e4febd17118c4c42bdac68c0069c3541a94f5..441ad322c078802198d85fcfc9799814929e2fc9 100644 GIT binary patch literal 112416 zcmeIb3zS?}c_u3B)vcCVPfLD>a@)o&qizZDGQzL69#)U!mT1XX2AfiK)#w4 znR~zQ-}^lFIaSr&r>a`EmV~M5I(wh}`1k+*|GoFK8$SG*HP@}-KX<27%@@j-$F1_E zLakCRS><}u-TrW+P|O_-J$j+J_npmWnxpQzOtspaa))x2Y`tb#xk9b!KDa+$DOvk7 z#jLe|zEZmo`sviN`{y&W#r>$?VB?v3p;F#I!}j@%Z8%MLBkD<0uM54qyr!H=qn5SM zM7C0%EzC9DVLc=7_r}Ajz@NgV$|b8-E9B7eGfj7cW7QinCF^qCDmxgS(|oGw4peIu z+sXnO>mQ#ucCzVi%2uiiwZdE;kFGy>;NYwFr6!6qR;^x19k%L~VgX~wH0t>ZAhI=D zvbAoNs>Mv*a?;gIJa5=9u3PQJ5*^{0X6M3W@ihRo9^ap zrC0A?r^zL!n&kqE45Ol zE;uycsI~xuGwQF@T|@#@!;1j@vBKQAJlast%oOot@GQUO(NMOMv&LqMmFxw)f3o1z z3*|YMG~DZDB2^0=6$|AHPPzabXu9kE&v(5OtzXI%8?xl1`Q7?6|Ib8`42YKE4j>q|8tZ4 zOwX3;%_;T9U@=pkYh>myxwRG=k`ZjitTII_4N_1zsD3-mj4HoTK`8CLE6ZBW5Igp~ ztyZbjpBaCfSN6=?0RLxn!>Kt7SarFc&J>H4`Sfh#>8BUc^;)Lv04HTt7u^9}hf{Tj zN*M=emf-KXrSGg$>lKkvS1<*z6OSpw;ZhMbGlVl zC{=O|0FiL<3wG8k*>q)w1f9l9wL(zzvUpM2$pcENeIjPAQpyy(MY;`dmw?Wddbv`! z(lu)q#8A#!X{VY2BU)4=*&g3&}nBLT%}0P;;$@lOanlJ zeMeBIQdLW)7dL-P{#J#S+dPJno&kFUYE7x8nx4W(+dp@zd==73J#qsIKj z$>Su#iq_g02$Vq)++coy6LbMUAN)}mz12|d-KMZJ4RM#xI0Xj+t%!l=D@Di*ANPhu zFwKBt3irzI$sdvBl*b-(edM7*(NWg8>-k*qC8?n#V*cy-uk)>)!LxLTHl%Dwl@;os zHuNITHp??Ds=@r_p&F4lamib=)G8Q@>Z?sRo9Z$*9agPq?v8fHgt<}y)awA z3+_Ope4$*K2lohd?qo%#=f9&K4$51NLavD?vMg%cS!ycHz2Qf5sAYMN^yq4qFW*&F zHHvD5WTyqc6y)#9zg+dGVCLx@MkQB)0$fp}`8w680vjJgUD9;yS9KsW0pVxD{exqq zLFJ%Mj}Wexd6qKzd3wH(BOR?#Z>iY`-vl+2?-t6C<7ieOr;35~X(f)HQ;)Yxn^5Mm zwSq84!cbmNB~Ssthh`de=w)8X4tZz3R-o=cc>w`B1jViJG%9C`s>6ILf4kfVs!TA?-9;5Ll`@oI%}cjSJ1B>Infn7=zeDy8|o`FqqKBl$a2{cTbo+_jc5=eJum4OJlBsOF%(Li&mdBi$&y z(E&crWI);wNAg&`1VIewR?3+onj{K%#k-_U#rL_Ag{&|uc~uQ0P~d=Oe4f8Af4^#5 zX|G`%koPnUtHnZHBgdkuuL`KorL~=)TGXnd;F>C+!l=7*_s9@~=e{1ie!LEyRyD2W z!Jz>2%Z`<)W%JbcLIWaMdGG3_Q3GOUkpk763@A`P^8pd>eU9dAx~M`Aj+Dao`q{!sd0xMBSN#|J! zYsXXiWz~TmtItz42=q(^U}1hOZL4m*S!N-wLA6IjjO$gQf}B%)1;Yv!O-+>|g{pwJ z5Vs2GXflY!z&EUN#nZp?KU6bPy^1V`Do5j{=E+oP_SI?f3Ov+k1+}A|x5385mUG{b zuh*;2q5b=_x$?N}q_%N%H6e3j>2kqa&Oa8!}nj#6&y(01rg3`pL`1|J>^yM8v-^^+t91wy?YR< zUaRM?iaz%&35^;+inmY*Z}*D2q|1ii&BC5BKdCiivkW( z&(!dgN{6a*gIA|W*5uhSG`w#*_Sngjsi}!akEZTB`sl>u$%iJ69GO0P=F9_4)${J) zRb_v{3eS{a(Vv`{dgQT*NATXslMfXO@j!Ijq-1G2kG}1XEo1*5c3LbgaXxoys5+Lk*0=JQ<|B}qwVSu+%CXOVU~b`l-#09 z$b!^gCD=<~`Nc-53_yp+#yXe5Y@rgX=QpW0>XmBIx?~l>S41_W(Ke-OT(4@x7t` zDi`n&x&pj2#Y(MKnRkHj^3G=cj%Zr;KZd_cJ=fz?;ygrd@{)$ODeI*~4;`zV1=fi? z({F$YL7mH92D;Y!Z;GEB(^zt8+<2)8rCD|V{6b7)mGAh^nFwCyk=d5 zccY%pTNzQbJ;?3|S5bB^>=t=DR8%jNEXV_I?L#GW!clQQu_5+SrksV=La`vt-5B(w zO9FtnYu|dihvT;f^=hCZu;dcNwl{)D9;$s=w0_m-CQyi0Ah6(|s`aEQxFkfOA|P(g zoVd=XpJ7%Huu@~)&3szQT!1iEXif(U{;EKN>MFY&pnlO(g4WY$>=7c3$fX^i<3f41 zLdS>rF+D0*bew>X||j#M$s5bE8+9tySPF@ZBB$y0eSA z2ia_%s-Qb?&;4U>A9=-j&98^0O4*txU{JiiQHD1`@$y|#M!$`(QTWCyMUG~(-j+=la%p%`Me9d*KoY6DlRi?&_f}=wvuUOOUa%H`@;O;vcL09G zxpIaRlD{gm7&~|l5O9FKGCWKOGJ&if^Sh)a`&BJuQrV0cn=z;gjZi4r= zP));KSI*Ffo1V#JFTh3U(}ID9%}z=%t}@;cl?XW1t?t+KoXq z-JRz~aIbr1P;F~aO&Ldi1EGE{%mAg}M5Oay7|mGF!1ka4R1>dLt0rnziOxZ@{D$CN zCM~rH=Nr3nL>Gg zzR;)_ioq)(NcrumcXs&icm=^*sh}fh!i*S@s1Zzo5n2b;LJjf3upP*xkTY;X(dQ}; z1uu6{7?@Su0ZJc;0L?kY&XFoDOue9x0?)IOJhl5GR1A_v(7O^e+jW6>uf?IP-`JuH zL^T0kmSJ08^6af}S=@xMp&G!~N}6?Q1&}gDTB9-J4lA8s0!ARF6W6)Bj3J!LqXW>Zq)%cU!o11)}b;l#Fih-Z(>-$9R#eDAO3?-*R~emaxH*mJgqDQcT25N zrcXMpBW!dOX8{o~+?_T0@aGF~s9KjZWVLvBGatfVt)5!h21tw^>P$HY23shD{2C+4bPp zTc5TUf?vP9V80{y_4}{2e>C{@KNjtI|JSdK=IzVDFZ)UR>EPD~{=MA@etqep{kGuO zUlr_2{;yxX?aK`pfk|58aGNoPnBV7a)`2=0=M0Cd7zuC{{AwMk`Gw}Yo6}7hHksgV z;kJeB{`!1n447UAUcu^q-f;(YNHo{9KZic;&*Ouyy2J2CQFY7|=Q9gV^Mi=?rWxR( zBaCPP4UW;FyhTd6QTeJ^5Gx#&1#!1&(gxy}Dcmt;)E2Xb@qCr&v8k4s*FID5|G{h? z85>ZZp-l@eQeHdb>h=7qy|?VIV$As?_HW_~_L}|g@$GIXxE zW&K$QZUQq_?cGf^yp5vy)|>W!U>BP>RW$+oH~3(q9{Q9^E`UgAVw+%{M;jcl&If5V5gvFEb6cP;v>KSOr2)o)KH|Rmha)SdcC|QYrSIo zxKgr?v&hM*%tI!6>v710dM_F@fb~PpslUJ)Xag+aBf|*qST*SKD*4CcCoj7*Mctez zKc-MVb)WhkBc(oI!e+_ZYT{}%o7b&bAEkni$UsfZ083*46Qxq42#w}|Z;bGe%BQ#fSb7=HthkHA8n1GJ& zW^=;TITH}8O#Y7Lz-!rXuzJH@hTGu85C$G*FSo_%BMeQ0UoTNNOdAvEW@WTt-$dZv z%#T~}v6%O0!M+VYJL3PgHgk>QfBCH7dcX>R>Ymh_FBFR@dc;zNGD(AEL3P-Zlj92+ z#N~!2Qt)UM5NkA^tD1bYK3s5owySAmJ$pCN^NpNsJ9-#JVR)sso)PKkp0KeHBWL5`px}11U!P0xLl?&dHvE)ntwD1R5=y z`wj&VHwkH@0=n7sVuz-?;<_BO$8|RdZkQdfXii(gcxBECUQWfR%;rD78uQwyFxKpO zMH5@uV*UJTOzQ41(RHBHE@r**<^0lW%=}Z~nR{sUqGC6Og3$@ldZb{CEKj7kKcP5% zpxQb6%9YtJY;0|-25#7iPOTp)k42C*$Ms|AXIH5vB?S>i1 z;bs#R@4=I;Dm#Ogtcu0Dx2PvxQjiLadd}=Arl2sLBY3c-W>uMIt|LKf?SF;|At1B^ z$xQ$d!c+~%E8O62kSq~QYpdQmgc^uLlsId~vyk?UJ$TRApGG5yP~B<I-R|eI00`WY(zO2t z2mVHP7jnUn^21#{L11SpKZI!pAw~o>K8))2|Ar5DIM*m4O~%q8NkB**fFnz?0m9_& zHhChcsZfAR!J{e?lyj9d9E7NaU7zsRb4Nq)rYtjZt}hjwg2J(F$X8S84B*X@yX#_; zr$T4JcXta0&yA2OB8`=2cojAM?H~5fVBF8Sn-SFU!+k(0Ci)=*PZCU2B3|>+?RVRs zMCB>_sO)cBMTi8?$L>1tYDBC)C9nKbzVefNWfRgez5N}7j2&)w z7O=PCa@u!zB9*FGDbF?Se~J3f*+0uy|2bd1jY(vXT*wnq$*C&!85w*v$z!sA3bhc7 z1jR~9o<|_|XZYOy2pjq+KRzbEe*&M+*+0#{k;8I+rh^<~7dzu- z0L?o10ivS_@6j)dcx(8niVWc~TATHMY{U-+mGFFk57poH-&2774}3W8CO&4i6e~%= zq9XwY{9+A>BVpI9&lDu+g!YEBIt75CV(DR+QE;!G1h004pQw6IApq_ylx8YLf6Jm& zy*~2Luf54=ag*1pytLD3AN&G|ZNQ@4(YAES8g~c;l&!!p1h5e*9XPE2egTu0(B@V| zvw;7w1|8WYg5hG@Aoit_XpG03{l~I!o4x&`+;E7uhH@lgjsJnQ2kFkJv;7r(gq&=C zh|>OAn}SjM8)k_+d{{`zOZzZ9rjas3g4e*c)PRcoi{YVeVoX8Au@Z0ColF{WRQy6u zE)sW)xIkQ+`l>ih&#Tw4kD*egj};V-z<0{QaR4dCDS_$;Ac_ls`YQ;#PG1-mSpEh# zk%(iZT!ygYpL7Dj)()j&~p*A7LaYM`m}wNmgxVbEvUa5xNV z;w+{Z#jUZx^KPJIXV<$5(5&!X0|cMx0D=Td8AO5u5NuRy8v{Ya-+XQY9O7+o`i{(J zE{ zO+ps>KalpMii<^XWaNt!QOxmSgz__;$3|1WA_+tCgJbZEsdA$D-UIg@z~g6Rd~}#n zi|)2T-I3p1z5?eebU8?cD*7C|H(KEmx5rpGKm%qoHzlLef$`TyYdMGraZA76l^~T~ z=MUbgTG&j`_1OLPB+Fg0)R?%|ElAkx5VQ%EgCmr24W(9qqfuPAhEhVdKq~lbFS4}+N*|i_IzjGP zmDyv6ZBgsvSPF0%s9uz;@&+$Dr`9sTDb=z<&?%3m@@>|kW2WSk1S!@k=Xi>mqt4W1 zb7A49y7C3yNXPE<=`#$(p`&c3%orSUvT`1{lvIuN=QmGzs(K6rs@J2b@&FQe;Hk!h z*~ycS$`T;VraZ!)o*e7Wm&J!DcfAfxsk_`>J?WabYv=-FGQwy}l6NSGfsJQk0$`HF z#4+Fm1^GCn8UAtckg;DVX0Q)?0if5?+qW0g>oC{76#);>!B)IF;`IwL5Yi6<69wzx zk1?nm(-ndj2s&AGtP#87;;8D8!DY$|Ttq*QQcq|Ck2$|ogQtA~c_c8L@!FWNklKfq zB;M=I6+hGoHh@a2;YO8~&ZE_}B@_$4n+nsm9xP(RWiSX1m($iGR&iZwNXVzb&IKB7 zpawN)wVzZ)i^J1r1NhBDBRK0>>}R)ruab|K z``)vkFioWIJ^RTnW{Ku}NynB%)|m$XpY>nt>uAh((9_C7AlixWZ4ObK2;Y(>>_5R* z$o1uBLo3uT(d{o(6g_78-?`tnAc<`tiOiR4OAi!EqUwqiB~@5VRQZ3o4iet#iQBK0 zL*X8^&i_r;If0X6NmR)zEvd6Y)C>K~U$ZE_%&pQ0`~e8kRD=~EQXXO`J=8tD*2VRL zF2+sF*$xH1&zQ4MVa^ux)E_2O3H(Q$v8PAsw7-5)1S+rYK>TqC9$msp`+EOUxRekQ z;VxiW#L;QA|L8QX(CN;;p;LKeRc$4I6{FZxTGes*!w_U@V5NO1gpdj9G@=taauW%s ztjK0$`#gV^9$c{+_0ct z8MmeGbt+pi#3&;FG{GH6s{U}BZh4K5AyJ(E%u8VO{@?|@f0iQ-BU=>W2EtLhO&F}` z66U63W3m63{yHVM-`zJ%|4kED`XR1XRaix7;j350{7h>3T9jHMtF|Pu==w{<%2r|B zOnUls|LN(FqNh*xik?=IP3Uf%ktkTxy>84q7UK`-RhJW1g6t42-DZ=H@jtY0Q|bRJ z8T4|aFqf`Ow9_YyDBU-%$awtWh*Wz963vJKufWJ$Q51L?Hs9z3W9VIurli54>oV3a|n+Op*++3~c;<|FQ8wg^hpSD{Nc|)3kpTy}H-k z(}~OLxcE20tP_Q-T5);ZW;7!^I&|-xRB!7?bl+p-K+ul69wiuThK21wlqKHv$R!dsxc0%I7xu+%BIx@M-Vl$1Z%FxX!*_eeK5Al)Xnj zZ;;O$S>Wv)c%BR%VX?&rJ zY4vqheZ5J2ol{?LR$ouz3ln&XUu*2Q;>$jd54D}k&f!nAGpoMx_?of{>c<87;bT6w ze((U$UTn-7Jn9_~9L{-ngU3eZ?yCp`8O9j@>fLs%1M!C4BT2Vod4~^2c4A3#*dTX- zaKne0@#Xs&?JoncIHOXX4BD>J@((vuf`O~$4sg#h&ppNAp4{#nIz;(B*iG5$&}KgG zbZ#@tJD%12o~FgKo#nPjCSgeD_E|#vP5d~=k2mAPatCqBjJ~vNEl%~!sQbWh2IyR| zGJ_*?H|7gDoZO1PhINDfgF$u0fQ2)Qv29T{i(yY&5f?8 zyvOMX9<&uv?l!$HWAiw}6IWeflYEC0B_C$vlFiiuy==d0Y-^1Pt>6IbsJsf7bl9LT z^kGs;&?65dT}jGOutA@Lz9y>dj-pBy_No?D4iUSG4_{RANg%3NWE1DFtf(RlKveN` z>RyPdUGzi++wNpjqiKV*tYHYSj}tONmLKL{y%S&`jW_I`NGiZ0XW>WWSkxAj6C1t5 z#dVxGtF~6iRt|0#u2mXyICafGYfWwUU?QjV#q4wJEJpGsZeW3ox`g!dTmz@%sBI$Q zBy@jQu?lXlcIU=w{>wPf{hiAz;jfyb25F-If(?holDWJsw%*<=XF*GzBLW7z@eW%w zxzXf_iAPUze*mrq;8}2b>q)!)RLPq$0Q1HE14eJijR7XIA=mxqf?#s=BX*(yHr&IG zuUv)LHIE~8u^oef?4XE;Edc=!>;1YzIJXm;i)x|{9)NI64I5o>r3N#$ctI@i7yHz( zZ-SNBoH`68HA0plyDZT9efqG!2XKp>Y)&2Bd*6QV*+>MhUpm893$OH?-^99izp=36 zp^pJ+=#7Oj3|RiD5F3s$|Jmf>`k8^wf81Mi4o-pi!+67XFv08NbbjtSoL~v0a&V3$ zxTVtJSI$F+js=HIJNr@vwWUz-TF!|dIS!m=2Md+PgClAu5a~u88mU<{w%&w@CZMMg zNzljr1UycX8v;QmxNfFYt?wI>n`m(HRB-{f+RH&qq(TS_oEkC@s*;0wMIcI)h4(Mj4NJK;N3v z8vsQxLQ%kNB*0Q0UyPG+eTgSWgyFDRwRu%LIZ~M!+;i^l71V=3`(O?oB_(#XQaBo8 z>fH4{nHzW&*ovwN*2P}taTbKq%qiGGT^K$14vQ4u@%BucN@|3SyzwOkUSTpF0ZcA% zR>g))>WUMS`#VHFO&vl{pa*OP1~G9vrrsb<4D!XE*3_s$V>2zT zgX%yDdRZkIXZkLnjpn7C1;*T4dM%mI|tH1>ohUh{pKpq4YgdFy-6eyrVlh5)a|C*F-Y8u(L6@rLappNTJZ03bOGiVYp85X4=Nw<}m_ ztW}u7G+L7|D}tY)>t`7+#S;*(chxPQM6cH6vq)Nsyr_@xRu}H_N$k>EJ`4hc{Iw~# zmX|;MD{|$BcXKYHwDyx~#P<6Mt`G3zgZ%gqKVIO+hxzdlete7{ALqv>@nP9-#j7XQ z*=c;an-0qX+;!fpbRG{D?TmcR$Y&Ox$UMl&4-20>{1p*eFWPH=3Jq#H34*l23hGPL z-p*QWBbYYYU1nUiBfhkX%f1{AW75f39T&0T%DORU>;V3E?}0IcPGVZw-TFP!7vF13 zUmZ2I77QbT-pi->os;kw;F=dPs&U?P%p20Gf${2uI!ZG9cVfi#wX#0ep^efL4c0g!~Vc!1)HcYfm?=G8tSR ziVf6mijL!Ml3%!Zx!kn>6pgt9c)$a(aa}G4fiuc+M(^b+7)yayHM5xK&*OGtzN(Jt z#A zh06rdHmaUXG`p9rn*`E7G<(g8SFQtNdl-$a6wN#kbU?D(6hAw-;-PjxGTr8Q(l(cm zWKm~Sr<24bBi#P~n2fHjRt`4)#8|N@>svUzc7k0}PZ>kxY)tDZ(VRH1PFR=eLXm=o zv6ATq*>Rkmp7O6#4Y-!bZO0P11p}^bm)B>32KLYK;{piN&XYQQM;8khr6Acrw6T)(k(vRtSaGR4Bv>XHvn?1=b?78*vz3xtP=kq_{%-X+XE zA8*+GN6DQp;pv6EFz}woPK7yemaaIu1DOnfLsSJa4ooradBI_=7gARFQURA)iObLc z*TIgjiwh>!fC185Lpum^=j{-w#xniJ;qQJ#FQ$>d!X784kq1=qz)-vQDzK4_XhG=v0 zyJo@tU9;p8wekApy2{Oy!x)Ef=^D~U9rd<2$kRA;ZQRKZ`A#stVj|iV=s%3k-&jp_ z-qL1GmhVovK-4`STCMY5Orhtu2Mzh_6s0zM^#Guo%8(;q-4Jj`=&zn zyPNE$qQC9XMS?Wr;HDyu9b8!$?h}1m$mPRpC0cjPDhjyvO$yEbTxOR5K!H{CpW=|W z{eD_`c|IhlsX3UY4dx^v!Lf0~{Y_M{OzGFS8U9nd&NAzMwrc&#fk z+2R8QYM0r`M2AiiEN?(79KA;?uZhT`<*k!B&=o;rOL&bXUbY=1-*3@_YB{Xt||9Vq)Mzzc8>7qQy7&Zxzhhu+w(l zo*rR!N@4ZID}dE`5e!iawv|AtF4XhT?y9&_i#O}0%9f>*+K~eySnP9W<|CVYyaQ&> ztw;5-_$vU~v|nzxdW2s$UTxi(}JXB`+T57!N~S8QVRu=Tt#Vy|^=mxaqrWZKlBldfojqxWdyXirwq z9QX>M5LNwM!f+jIID$k)p(6L7gM6uoJYE=Fvzl`$uthirxc z{#ITK-!XyvfrIdGB<5vj2uYQ!*Iw(&b^v!w%4|Dtj@(0OC1!?=LUUE^4&5YJfk5}- z_&quEU<=)AMUXTC-kl)RBR>2JrDvsO4hg`pd_-KyfK@KuzipL^=LfP*=zv)?Rv+yr zwoxUThZo(*(lG~5@_Ee6o*=fC{DeBr>RJw7=WVC5@>g>Xp8Wz@)bsf8PpIRQ;DkCB z*~IrNdqN#)01R)TM`?CjCc|6HjI1%U$l#5?N>~UXeu01WF2(MP@rK>w$y4m0JLz3> zOC;FMPU@S?d2u_U>H8$Hj;ha0qfkT?pn?tZh$&F*#EtS}xZDSOUKg5A-mrLvol^T&+&}3Lz!az5Kdjgj??)HuUCQ*P??p5(#3_j z@}X1~?g6V7c)OOr8Pi>d9-8j%mR&U64M;|vD>vPZst4>}!f}I5`Td>wpOVk0)od#k=55j;4~W?CZ;$XcB$_ZA|_S(hZ%ZFwcLEFH@1jZ;Keg6W8wZj z4&Po=?^cDU=1OAWqFI;^-I;P4xk?Hr1&rZ%LLbc4O0Ax0`1L$_zlhrwz!XNNVJS8w75Y zgi&X7HBslrHuH$0&achq3t1=`e!_T*gj(?wc=*GlJ~B!~f68+ThjP|zrU6|BhgQ{r zJ~-iRg1)qJE>p_^r*O=Nq!}{RtmxoPeToQlp7SKvaS2|WA0Il+6F#8WSgKj?=z8F& zplS#s^WY88r+gQMc0+8BPR8^V+}i9G%W=pum~9=IbocJN9OA=&U2@c$bCNphkVw3( z{;*0jR*nZdXp>JJD@nP`SNJOQO*EoTGM7{SY*H=_Qz1k;hAlbX(xX^30_YNhAO{{C7_Lky`>=?ETLBA#6_k%6*5mq7PvO`n-oOV6Qqp8C#2} zR&;!#2=-bAom|4vWT48_5fu^?UxMe7@e*Z7fl!K;KGD8+%^FDj>HZ_}*fNp$Eq8js zQvzkIN?bd{kJQ*0c9Ev0-ZXh+aw2_X^324;Cy%D5kG}D-$?2o_zY@iB)dm&BVrsUA zgrrQ-nyc*hF#iyb|1xDrUwaB8=;u}wLHD9bd-60-Dz4Gz0lyc;k0(sjsl6U|@mwhRic|MFh}OMtYC9rR+51ulWbKg>{#zZo z=t>Fa*gYluNoCZC65e~*8uZ->WKqp9q28&lzFm?r&K)b1nZ+I`8Bz5EheM{a#ftb5 z-XaG*)~ss9Dby>qg{MaI^?KDgw159xp`LHdj6((4--6utMv?2mF1s~R@Vg!GL-~?< zK$KOp%2_MoPk#CGCSbCv<-TMr`hV^)$dw?m94;gNIszXqIz{bUEOjvj*y%umc z#?T2+JwiNv&w3Q`qOH5vL;n#|XH1A~EJJw_Gkf-kmc%{mzV(4>d%H1zU=PFwV;-9r zjXXz7J6K*4Rq_{)$xYA30DW>Um{#xmm{F*sihiTvgGs@Iy7*>B=&e`inxjcVNkeux%C@9 zsJR+Llc|%L8l1uAoffzeLwhf3aGlKYY#ErKG)=`H0vCoV_JzH&MX$^djAcC*hkkFo z0!P_!IFxO!Gw?a8=>L|SuoJ8WG^QB@{5^=DgcH7jB}qM6$yvx;Cv??b0J`YK(T=68 zBDcK&n+gZ3re-XJI13eA838A~Ivhs5fJOHVTIl=eLkt10x@Q>3@?&8eRr(U>x$-ig zVHFpPe84ea<0B3Z;;)ibg60=l%PK5FAFEUtW_;-JJP&Y(-vfvPM9ay})@l)3 zDri^YA}`#GQ<%%wQ)L{|t||y4sa0|fxd|wR8+O&PwoGY}kUTmbV|Ho;U0(9DG0VRU~2gA z6Dav4NT6hqOeRrM51kVcPAo=s`^MDkKr{<;Cl|2;n8 z-l%>*$A-hIp9$I)s^9*@mFfa6#DfSO&Fj!47o6H$s^eY z_7!5r$cig3Xk#o@nc>b=QMr+ASY1#$I-2o`M^tggsbmYV(r}G+`2G~}UpuK$lGDf; zO>QH5rIVZmOHZfWctxVt@gy)Pk+`Q(R5)mP5A!iy4XDNCcK# zN+@@@LI~sB$8a|EN_<-~t!iVYSa6^-<_c^YH~*lf!-v8FB`UrjMyP`ulG#E5_Kyx! zc%##ibENMQph)=?dsX_ym5e1BidUDAo^901=n_ByOp$EpRHR4S%1U+}dnbfK2)a0Y5Pfi}iv&pH$ zCm%a9IrT{DVZ1kW3M$Sd6dg2r_Ed@;>ZT@-;=N<3M~_Y)K8}_r9-cfodG?8YsbiC8 zr`YB(G&+$wJu!WD^6+CPC#F-UADcdX>daAee+11=O->z~MlVMnJvw!E9KGUM>gb#B zBX#EZ#L1J=)x={M{j`kl@Tt>JOiw;?{A}v@sgp;J;^D(bF){}5RacnQ;gb`S$Z0w< z@#w@O9Nl#46b3vk)%55dKYmmmp}z_Iclhk&sVPq5@TsY@)A+d$vz$Kbzxw#(nWOts z6VsDt2#{mbr_emXi8lzdDZV##RJB4-%c%VoK@|e*F-+PY%aNlKC(#y`XUeE-V*R<_ z?Xv{%`%~Df*b>C{KxbVg|IX6LZ3Z3Oe{- z#OyN-Ks%5=iTy8Z;+p@Zv;RfFd{z5j28Lm}uE&S(f8mqB|H2}h7=C5_FQfsO(epj> zzwDgm&IQSl@^&J47Pc~TTU9Vrmi>f_5anL})jO}uzIem#R8p@D9s6lcGwn@t2?xy= z&hT3o4GJHzWCXkY};iqo$$^w6$ejt{fN*RT-U_%6WHUClYgb^MDenhNu+wsO<{|7N9>f0PLivK{w$O($W)Zbj zi0o8~m*h|u?&(G|KxRa1ODdQOTW?M-u*U!g-i*%e{Wm2X3aBAt+#H%549OS zU~Bs7-Wb(KlP~5T&s$|s&BX?Uj&c=x1WyFj$Pc`)7Sw(o@;mLmk?k4q$besnj}d7z zWDLqHn7>mH3Pe| zfh}OZstxRqlXyN#)}*gywD}}3uvugi@2{+ZO&Wmmf4N5n_D$22mfFC!vQ(oXU3n3q z$>2NGGg#1X5K2O#zsA3MXKsHv-mv?oZp>}Rv$ti|X+7&D%*=TE)3hiV|r@5R@)H%9a~*>Kp1HnFIM5&aVNNf^2EFVQ&|DDh|gM+xnASP~^V zJ4+16cdlwo@bCIRK^;nsO)#SBpZnIz{VFdVZ;H=aeZPmeUGCfOvE!aNSo~hvTTR;2 z679IN&&6-oAN?}CFrh9CA%1P4;Q{Q@ljbkTi*2D5b5W-5--EaG*^d)QRvlc8aha%j zy$T)`ZfRP5KaT3vVD4MC_EZZFl9>Xq?^a741ABzIy9bwnvEtAPxQ4+r5;YJ;#+w0) zu90ymZ%+W18`{7nijM#8cxB$=m2h;{sCYsFxmxhEBg2snLOObYPl$-27L;$MSBh&0 z1v6HC-hx+>@A~jndc7}Klu_V$Ahoo?rYxk6d8$0(4onV}tgu2IwpZvlh)bDbL()#! zM=Zn;@hpAiB{>=7@hGu|M4rf|_j~$JK>8BW7y&K+n5}Imd7GfW_s%itCRS>2h&S|h zg2BOB_r@Ex1L{b%FnO92erWf!!c1>Bq2tAs1Men11=_Ba^8`oyy108~M66i{Q6v9y zo_6*ygJ*^}K4&?&#@!1|NAUZj-3$?E=8p%1B(e>`jPK1bjYXY|!@k#ctiHy~T;&#v zdVeVXsKO#aB##aWxO~9vwDL(7lhu0>dCGA_hIZMO0iwtI52Cx21c-qsDjDC}Dwceq zT$u-M<}wSoU|V1z(OeCv5#WtHrH{sE{5T&s&8nk&0~tCJ!Qg&(7M8dcVBEFDUEeW` z7jTrj+dw-iZzm^s^fPxRtDuQTlBp37d?N&^+Vw1{)5OYFJjP_RZ*12t$lhxiB;T&> z7RIqmPFV`030w)57}sgrw|yeV;6-WWxY_c#)*jX2M~F5?>M${%<|{x z$X!p2-BlX9D>o832eN0ahEeccy+Fa`D}DOJt{4QPNcfIP>TEzW?r#?D4LMJW6$VO} zJf|PFBY`WQIdPP`Wq{Y3Md&Rtar7SD8|4Y_TK&<4h+=eaT zF@q%`T`h4ulk>V3&+Ht}l(oN_noHxY6MJWiKG$fUUQdrq0 zjb7RFg-9S!ZlCT^u=D01*jcj>fWzk0*3r&R6%BRv4+$rs;NRt6y$g5#$9Ti;<2?v> zdh=dpyz|RYczYA?^g8w;p8jw8Nx~sduK=zQrWIo+DbCGd&X#t8iw`DjRz@vJ9Ge5H zT#4g|7XH$oj)8sOk0VKM?9IPq!{NB8iHD*11BCfR&ol+O48Z~4LAbG%hQ}p;TPl5EZZ%6L) zDB*z=%}(!aUK`FsgQ6CwE+4)7@uLr)!G^{3W78+^4q2qB`}X?{!cjxpVJsMfF+i`2 zIcx(crzClqH&1x4o8uJ~Sf;XISZ3ECwj79sji&3i_1$g{2wSJ$#_&F3GM%de@8cP` zKJErsA4VjWiO-(>L_Cmd5QW!TqM?C!cK*pIO62oZh6Jab|FfIo#fi#=-Xo= zGUz$?FjBBr$p@8Cqh$rUGPcB++fm@*(c4w(`C6qhmzQQjWCzJKKIETzivta0l)>CN zPH$!&&5S{4;p8rzsVp;5TVnJisx_SZ2Ak2Ug8Vp#kPl9^966`$T3;CqLX4=g;#9q%^0tL?< zZ-1K{ijtQ|3}f>mm*q~Y^El*UQbfGAN@pvLnq17|@U#_mDGPLK??8NYiK_=~eJIzx zNrzxY2Hp~fpl^M}E>YO1hVP9;ZC+l_t2N9Q%LaFsakRi@xx4H&$)IQ; zW~bRlW!8$0@Om>LAhzc^B@se4QK7(#4!JYOMNjR zRA!8T2Bf6g(1_%&-H2D__gKC$iy5v#hz&7{^AJBF2rv@*GwPW+cVO9AJK3b7tPd-gOeS=rwT!RK8NcJIBfS2U;J z6WbGXtF9>rfz=Va+y{xt*c1t06SzKqT*AV9OcPX_!w9{Hh+7J{Hzh)jFSWrNNbs-X zYt?H~(>rKQtY|@M+k8^f*c_#`76{!1u@MiKPi$L5#Fj1MqVw?e*wI1*!oS^H2nS^L zwRpoJGTYOF%zj1`IgR&n*gcS65tTyj@|Y$bT}Sb7NF^aAMTA0wKJ9?~_^Q_?&eFH! znuS~i2SYG9lmb|JdKEd-g#$qdX*=6ME+Bk6xQapOvD&EN&ZQW6y;KHmEc&7{nN%kgw{3IFEC;0K3{P+!i{4GEJ3qP)Bl^ys9GFJHNqP(^!uPjRS zMX4IfVUfngERn{=Ad<#KUz5f~#ISLjbbVzrTu5$U!f)%5lX3GI>`Oy-H)8}Q2R2F! zHhP4R5Q=|*fA!AAI1z8yeI#iPiz5?Db7D()7~dcQ3_4i`SF-xhfbhauE)JZEIN_b1 zMVduSgty9byl06w^h7P8u39o+d?pTx-sn%$Y&e{$VFDef6-i`p4~Ej-oy-D!IC-4t zUKvqadI=Wh#NxhCoB%e0eMPwi^Hi@y4JcqJVVc2^`<$L0Kt*2z5Q9o2%eY!n}; z_~7^-9Dqoeh&QcJ<^gSTScZ>2)c?bAYkJmj4xd~TUpG>XLrs(#IJN=jQUN~;#SBb_ zinj_96808BtQ2Dl^#7@Wr9V_v?oMb%m7scn^?u4g+U}2`%fgB6$&ni z4&xJwIWW%*SGD0hKpgm2AZOLz6 zWbzO>Fa_S<2P3ScFFk~L%?DPK*KBVC*C?-fZA)tPmOxyd- zx(_E^R-Y&7JCL|;FYI!?=Ghz5=Rn{{i}?WxCU48lZ&PEt!TnC-psaAii< zpW<~tP*h83nDr1lsLb4MBK{TVKdjySKLA+wmefqf$+ta5HCPj!C?8vsh<0=CT}W3Y zeJGSxp%FQJS8@iZ*?2U7qcunpjIkggHluDb7VBz+w$dTl5DW#}Cj?x@rf(5r`S{S` zN|{&ZTEzv#LEAOO#qZD(idC3ZP^p@6;K25>mX#9gRB`hd@Cqj%2&%Oh4l7D3#yoD` z6-NW+ttp(j`rf-knX)sF5wo%h34s`qc@{V4br^sF4zDN1rJ^kwhDM+iNkZkGL@* z(?X2Kqf%Kc@Z>WT;r8u_%Qki>*k=uSVc^VSK2~7^*dEws6@yd~D?Q z=s6X?*ycTdSza__hfZ-j6pF}UVuko>^_tV_Q*cbYXu)aS+r5>uaKW%=vS3+N{%y8W ztMT$%Ws#Uh%<`-l{f?M1>W#P^(xFkVGmBnfryvJ^w)$KTw*sWMZq;UYG*g9 zh|8Cz{lQq%I~k@ z&-@WX-Pz_@U=u3DtTzrRe)VgZ_sV#rrb8+<5LC}D-TyG$E!YjxP&ca?y??#S-rv4- z?}JA#XPC_47#)AB%Z`6+>5g~NHN&WsGtMLN#7Hp4@RwbV;hjs5VejKs5k{!^Be};; z_DvrcKQMmp0e>razQVrMurUS%&AZ zSd4eRd1rS7@s4G6UawVh4IxKk<~MfP`HwE$`M^o|r*YDN(eGVd_AA%6cIHCuFI-iO z=wO#akc(|P4`Io>Xw*m^>~bVeFTDs$pA)5qaz$R6a*KxwsySatqHmz_&)V&~3V zBp0f_XS?iMVwj!#?ht%bV{p0~L&F{7xusEAO@XVVCWW6Z5{%Nw7Lh`kU<+d>54MGo zk{#N@)5x1_VK8JAwop4sS6!P-3R3qw5rfs?XG9>i6P>H6(nSed73Ad%8}9mgB^y#E z{&0O8W#UiT6?_>8@9uz{sUr6zUCq=obG1x04|S!bQu5cE+jJm%j5}&8AH;xHrmeVx zauyg)_G&)oZiK_4QNyMsYE3SJD%pIcMw_{rKcdx|A7F55`CS5MCc!q#_YuB-OeXN& zq)mJqpjJ)%c+w{R08M!M*~*vn{pdEq;&o8%rqC>jrl?@XO;2SuzuTrFj#Kzv`H)1JOS`2SyCw%7f^SomYMha zo<%>JgFS*jl>>c(e}(fRewY-%#7M=D6o8}dq)zbC!wTr)JKc-|EKLMu+ZApnr4rM{ zl6KtKuHJAq&TA1E$h^OgiFig5iA3-c$4eyTbKw~N%Q)Soc;8Pa1!;g@pOz-t@;VeEDP!HZW z5LXX=lFvfw!MZ>_us_AJ9;yhRW_wf`JOxC0h$#(4R8vZWJ{soNl7c;;Ck6Jd=;@!T zmFqR*+&Ni>XB|;g*?g3liE_~&pxa(pEiwFdcimA?U5}ybvAXIyCbj-DDewcF|MR3x z1k6kUKf(HGW+rZocFe5j`bH<$@~-Q9v!0yMQv7N=`sz|w8&29puy#X9o6u_~j;9r` zU1B-Z-ZB3`kCu)TT59RukQ8VE+1)Emv=ue&MR-y*8)%Qbe`vql9-1@#`%m=?cJ4N? zG2QK$bQ@z~Z|)wJk0)&+AcM1NIeINW_MANVsDL{)%H!I(cl?plkL@)y@n`!6{M~mH zizNa3nDmZZj=5?hu9!cQ6n+6ZKb^FR=n^E9BFg)qh$=x7!6;snbRwD`iu--V{4gm| zgB`XB6@IZtSjG7)#SXua6lMYbd_HLt!ODFuX%l+oBwVs;E7vm4`WHP~G)`ryMfKl)U48)K{A>K>ZEpR|d95WcAvq}Miji{p~RaQ}zC!F_uG zcYTs`4DkOmDbxZq{_jbf=sVz}XX=Kbp4!tJHXq{^?r;Eg9`qXn`EBUAOTGH$q)qf4 z^#Fxz4 z6-)f2np@i>-M94!oj9GPsR6UvU)FbR(3_aS8T6OHfPwQk|wReK!dI|}| zXPI>SCVrgb$D8qC$uL>?ekRv^fFB>^$A|dw0zW>?kB{)F)6IosFkMxk?F#QOKrPV1#>JmN0NP z*D95I+F2;gREkae$1x*!;~DkK-+ID*7*p;Rr!a%#!saGsDwix|UVFbEwu+T`{N8|o zIWoGZ+|4;Fi-VQbw!kTO0QU{b{t$icgS)NZOk3ePi|(d$Hjh<7N_g|BraPR@lq==K z!IQ>}J5ZUiaXJI>B!Bz_cHC@I*PJ<4UFI>MfBNNv*+N|n0n_9w_M722e5Pqn$ydW& zm#tR$s{Q&KfGF?99PLB+%bd0S03J=*5AxI9Br`l!J5|Ljx7ICeiN)n=r!m!%Lx6Zs zCTer|ZBNLn56dSr$fxYX_>5y zwF4z=uVgNl2l4edkvlIC(=~?k87GaqvjoiQT@H6qM!P73U6ipd%1{?&q>D1pMH%O! z40BONxhR8Nlrb*K5Eo^Hi!#8)hOBzHd;6k{Q(?n89vVq_siEnIPF)Dy-HfZI=PI&g zfWy?4+>OP|j8&9@yMuEUuJFiNmz(yx(Qf{TJM2{Rh4STb@uyoLtMpl%2HwcTy$V&>_EbX`}lP-6dMfWjR>vp7hy*mO4_OAf8qo9}Ge@54J%Zoi*@ zx%hZ*^F7U(_coEpSjnkzVAKm^{x9rDe-70~Bk#If9+g!}wQs!{YJH<)WolWRIQSEI z*sjUVXQWcOKiru|Rw zw*7)MfnOY=9w&aWW!?OHnt}}O2IS0u@SY*=ZY-2ATBqr5)~kkV*#PwI$FPNj%;&Lk z;o=ds{U2Mh>|e&(kcX0Ho_B}q^OZ4dNU1yH)$$w`!eQCx(c|-AJh`IYhw}4y;0_S*aCpkwK=ImSLp1KMoYMi3}mFIW9P@HJiq{gmvpOQPsU3Qj&`S z5eURc`+FSS8_WXs&M9!F?ryWJl&Mz1l38+#St8A)L*+M`<=D3!>N4`O8d0D>xO>c( T!Heq)RW+h`kvG`J%=rHg*rgEV literal 98168 zcmeHw3!Ge6d7t%Ot+aYeeiAN=jaRm2R~RUYu)%9-WiMK-H>R?(WRh?96!X zj8;M1G zxsP-2%L^uBdBth<5#yghCuUnpG}wo8`_)pBXh zF4Y>|t_SLcVs0$*=)H}DZ);p_40#(el}cmM>(7<5wW@9B3e|@9x{-W&&K}7Wv-Ze* zxq2yb>sGTP^O>3A2;Q%=@=UEzE{#mHem-LzZo}J*_oS*l~yH+k1(1%RDmM;S$JK`lfYW7^E zn5o%rx{|5o*+73fc-F{xyX-63Vm)WGxMo*XANupPxnjDOx92dxU20tEYAqK$l5&?o z%?9;uNvj8_bcbE3Pp7l4o1Q5YF)N12DcZ`L0I(=KL-B`|*dj zThDkpGSN3+Y^+t@yfJ@MevEH!0Rj3?*;7nAN9st$bmwoT~$f zgiBDcr&i9U%hM$2G(M^p!lD-^i_&f$P*U|1F>~d)Ou?U|7vbwUpmVuaD%b3E)t&({ zl(Kf(tz^K67FAESOBISAxc=0w^+haE0_tF@v`CJng6ehMDRD z@JJx*t*?Msga)>kAXNY$5lfB9Nn+zJ^(?}muc8D6+vhT+3W&%}*Q=mWRYPB?P^%QO zmlhg}^3o1{pDxUn%HS`Lsc-k0k1>(6KyqEUS#d3Y;`VAL2F8WI(mA^V9)5D2w3ojp z{|eRcPB6H98ZSU9kkb6Gcgu78aRAY0gSxBR{4YS*K5Y7HMK=uwQFu zbNx~F*Il+U3v}`yZF2$B+T(SgWdP|tk!R_Q5DqxvRz`QtL3^4Mpt58mG=I?4^+Mm|@3N#0NrG5-hoKj2q;!e{9SZAe+aDl61LZD>WF zZIfqORD=1;L)9aH$c_BN>amibXuVi)^LDl2zpf=~pbq@{Nu5yUvekkx zM#4}&P$f_Sz=x*mHRxr2$!_^(zFMH}KzRWH+7HF8@C07Y6jg(HEB_MBS4FQ7r1P~b zRj?;g5WzgfBUPDTp0}4Oq-7aOu;!(^Den?952lIESKL+zJRqyYV_rYDO8FPI5Rn<@h()CIX+AE~5s4&uu(i>gi z<8%h34RIuo)khG-fNr^zDWXcEfM2{<>QsE6Gg-(Avyx9$M?wV-XvXLHd-M0Hx|Q}C z#R2(F!?02;)HHG|s`pg^^|`dR6I6{_RTNxP1ymSybKU{zV)#7JgEtP>pwp_V)i~G{ zV1CK9Gu3RK`d*|%BrE@0JvFLBY%Erws*?_d>Zk7PO{zcCNmM)dc8=oH?{I)actOcc z{iVVLk^p8mm;$9Im+Q4ko!XGEtZfoyO%G3@HpD-a8mJ{uvnf(!aB6~;D5RwGt%UW% zmi|~Zp!*u|R1E?>QUO?$UrXJpS$~vSh-*;oaS`KsR;VE76kkEN!bwv@rAVPF;7`PD z0y>%uVlnUwyHxh|ul)Dah*Ya0i=oQVxT*RwRjPeun!ExJHCjRKsO257F|p;m*W_!p zihFcqB%3P@J8sS{7A{wZOLlFfRGAym@81845!-#$2qq#mH&;w$*)hDVAttH=YmBpz z9ez`q7gFx7fAbKmhBqH<*sA-%$GL(FX}ci8dHtK;#FrlPUkWM$Hc;Epv*CYx4^+Ko z&tDOL?pqQXHGmXvq7dHYKc~q;!uJcd`vrB%w?*~#9{(Yva5*hX1>P6QcS}$da*$f4 ziloqo>BKd&f?Vj!)b_di?m**x9qMZK#&_ho36@ z3s!h$4i^2y=;VXvM<2v@6BG9r3&Z8=tpCk+8tzv>DYKMDUSA~#Wfo6}nHc&`RqQi& z(T(zN@%rfZ80Krf2O;4V*bD`#4tZ09)gx8)tEx0Jmq)wQBe-3FnZhgq1u3~zm5>Fg zzvf^sf#nzLb0q*e+Bd#)19`l42@6z$7)5qw2dLmF+9>WzKsjRd2PRvVGk z>q}+bt-b0)DI@EL)=LFIzT2V7U@VInM2;lS52)u6p%JJgQodW2i#I^qP9%iB>2moJ z9zs`uccxgbR?G7)5MI97roRzQ%lR?*yVP^tKPAp1DS2XBM#q)^3tgl+5yYogjpI*_(+4xd-;$KltvO73OTn1Ap!#C3M13aX+~!@lmFfh1No`Al2Ozwxml! zfOzZQ@)94%Zwue6f{MVB=ODKI9z1w|^)sUNt4gg=FWO4b`WlThNTdS9>D9x1V z_z*v)PvxpVFfDFz{v2<6FFsweEsW5%A}Xm>p0NUV;=rkbo3)D&ivYPB(*7R5i6*4J z%P&h(j$hK8P=kJlK=Jx~--X7a{eBfC49~7*3PliS-G|IgerdK^hOZ!ScLej!CTc!p zvw5n5-j=)WOa0*B-4`{#9*|c`_B;WD;*IqZya|ez@0Bw8Z32xVFkaa+sao6AhNhwH zq$_Zc!aGr`1;+BuY`Tz3!;30fKbixQ$h&*!BZYi#R<hdv z%5XsPS9u0~2hRZlZlU}ulDTiCjZ-L59rOC=V1V}%X7PY_%Ys+HY=^6&mal>~)8f(t zkCHCkn5|Tl#+vlB$P2sWg{H&V$D%d^LMgPK`Yc*3UxLZzV>&-m1%eaw!24x4!+Tq( zq~WeBW$43APiL~1;39PMctiU0lF9-++@&-HO1<>PQeiK!PBQsDVklCO3@4i$r>5w@-b%`ROa-fM9v>o2zJ z0#QwXmnGQNmwkIHT9z~+9H<8HwUTDtY5}B7kyfuydjm@6mjxpb)5&$-UgL@4i5RT6 z8CoLz)=&`kdpq<3TOiT4oY6~V{t{chFTa(`0$v|rrTp;khPtMkt3&Jy4R0^NH6wJU zNLsK1Ow_)TA#VlS-BYC}f4%^>YWQ$_wO*npJFV8`cFw>;b<>#=G$*``uXYsR+AV-J zJ)s-AMGE|S>}(yRNzXRE0b?$dz^WmSRj0PO1vV~qHN0CRUxKD>aqR}5ZYe?`hwa~Z zraqm2=qiS&7wLwTG4_Jhk301VA-NZh~F45ET}b7middb!G~CTJ=y2& zi+!TthWBrd6*!s88RZvaH}=L#X<735{@8QH;u|M7B--a9A3lh8%dO@3c(bafaq|1I z6sNf_rwJ&fd_j#xZ;$aD&IS16vGk+|2z$Wm16=5qqxL)G?GOVI@?5V#c+GpDa`&6Gf++UnM)&8NG4W!ANeGs+dR2rp&^Yi458L1p(22wwt% zLj?hi!GtAO%whO@qkeyXnUR`OenbB9qBr1sZ7_TAj^-csw#x@;e|4em^?_!n4$DM2 zHHhm6N57K!#CPueyVu8vAcP6Z*h}wdsIF}m9k$kR{*cvg z`K#z_2D9cdIXCiFkbAkAjK+osu!^EH$L1)Q(r7fR>9Q$K&Ee!i)G z{#<_Q&R^iy`3^5K(HswYF3q3o30#*AK6~Zk_>UY1k<B)Y}8l2BuqTW(R%miEkgGa4F8v9ThtAmHka`2`SUqH~4 zEk!w-YwVZl*GiO$D#_a9kKu*PTv65MN3^&s3~r!=sN!OAPSz-kC}#iALoQhs(|IiL z@)Hr?a>G6Z#T6_mqWH++BZu+$s`QTwId%yanAFgDg}R}jx_lV|uuAX7Yz4KZj>KQM zOt-X+z%?jf3D;f#JeOqBh|>&f`rxf zUo~juI!dhoN8`9~9i@b9fl`WvD!%q2a8P%8CA`wx3LoiF6`Y2(E6|@^y^Lk7P0#`) zUhRwy;X!O@5Ql~ZH|q6TWZp`)9TPQuE%kE>E-z~_Xb~>k)Dp8;NT%X*;a4}^aKl{*!D1C$fdUm!E)h_9gW`6` zf~N+mTiJpjvaNbW3fu*7?m4Sak~_$dQYur=70QDaS^02(zfr#uKB58+CS;NYtJ~s2 zv1&!!y-*R+y5ES)R-sO!gVYh(mjYb4sud;2{LYJps&!IKq*Nmw8EIVJ#j{M!lE@_~ z)(YydMLkp7TdXcB7*$h&FdS^#oQ?=3G|^BtQ?jN3Hv&eq+8?ooFtQ(?=0pM}!{`bbi;_6emZ$9_w7Dv_Q3&t|-xZ^?*k4nB%kRjKBx-NMAF49n&^g z6>1Wq^~Z`E6;%qTH0y3$p=sY+{kEmmAAw|x&W_JmB`^qfm&4XQreT+n&#ht^K1LdZ zpa#`xwZ?l0cd!v7!)FD!XCN0xJp=D4yk{H`Sc<80Ef+iWR~;iK8g8nbEw_DsYz3NK zr`-s2InCbd*aV?sp-lE3cadx*YNugo%$e_bkHt+3e>`2006}--BKXa{Y&K|B%;?-V zp8Ak!K7He-ve_hNrYiTw$*1n<+zP4(Bai{f&4P^xfF;wNg4 zXCpJzE72WHR2)5~d*67_wjhaPAc>5Zb4z~}O5*B@6m_bwn5dHHJoMK6aF?3roXm4C zP$jRlr1lCCSHxDUe+6?E$CueP>Os%}L7K9#0xaxA7)lp)Prr6?%bB_a^h5CIGG;o^`=iCucm}Tb(8g1~X6fO*f-#GU{q3w+y0ZV&>kT^`4u(QgO4d3pZPpqu{GjMZ%}_vjlf2 zsd~e0R?BMw42k3PN1q414>~XC{R`}I6xresHxQ26ZHJ2$tAx4f*!Um4$Mjb#O#g1r zF#Y#UT9mewS#lNT^0is* z3=-Ha^@fY+MP`bnZs0-nzTmae2KiKH5z# zuj@27VbrJNU5{@j%$Sk?$>Jtk0rFZ($uW zuVT*)HngjhrPxR2KT$hdocHjx-P`~p`*CKFkQLF|*zmUb4JvtM5U|eODwEz;WPd|A zJ5yWXh6$RmlW~s8Ot+~v)V46BYt^^1AK4pV#zRJ_aoZgB3t=yHjl0+P_=y1`h1gX% zz>Q{1d#CoJ;mHoQPn`Q8)vi$QR`tYBPoTD1`kQekxfz}t%fkoTt2V>a{hey-osVKb z*s!)6X%+w=Y@@5Ye&Hr>lk8W=uy*JqhIj*esOC6o?zck%Dr5tF+W9al!4A6H%uRJ^ zWP2@F(+Gq@2qSXDY-VS;w|i%+6iD8SVkNOEoS!0X>`?V45-54MnFXjR+`~o}vu(uakDHug>Y)YHV_hXph7D)K6-iz0r1}@$}u0Dqi`?lUP2?)spY^;@S z&)Bf*?UW~yBnRPPbK#>3QYGceX+#I(E##j?zBuMq3%`_UM)s%61-GDZY$q~jsEyTX zRR$j--K;`q!FO*vI?t4>YBw!rp4&gM!6ZmT=lm@C{j|3Y8`6U9h(IZ(96>G++3K!R zf;3L;y3_edygcbV%klmM$Ge|8y=&!nc!wKZn!B9&$sZQ`rCPuLhA}H zuD6wsnYV#2Nx`BkyQpx-H)J0i^7jTzK{m~yzJaXXqRg$V(!v0D>H6=<=DXd|C+fY& zutC>dn42yagXF35>b}^+;O(tOja&Uz<)b}D{oof!u>maFAFoT7+~Do$izLxA!;OBH737SKMi3hEs;1`!vRct(Qh24nv#rD`NMz&k;-&GHlQdAo!IEN`1u;GGSdNeLu9GFhE8?(D%90%spD z2Y9&&7u)MYE=`@Kb3Hb{W>f-9Y?YlYmZ!0Ed~?2#!{%oEHJ~dD9`vc4I@k<;64W94 zHnF|5h>S>HKX`LEZawdt*s56h|CjR~-EO!ZMJ<&iZ>OG@)I2u1BDZ5B|k~ zP{}6f+(F27(OiBq`$C!Kf7l^UP3 zh(9$;5WUXvr0hrK`eSl^w_KmW)p-vuKaR`EbCWn2}Qe?{&;&ijAJi)aeYui{S(_1EyzL?duM z!8d=MmzdypKFJR$@txmb@o(}Hlkm=O;o+hZgV^>&&!F}!??K4=ffwV$Yqg3oOhiss`5WnKmQy*XyT95&%abZf1-ZA ztbV?te!hku4B+4J=LYBN_;LOnE=s^T-^8D&=g-y8U*Kob`Ac>4Ex8FWAEN3B;$;^^ z)n2-J!~L$Zzt}YJo4#QPuty0Q>D*cV)ja|BhGfOw$xa1WEDKFcj>R1Bo3}Eu7|rcrYA+R|q}Bc^+3duvTu3JwZp>yr3G~&dBjFbCSymhsOXl=8*#&>(95?ew5HWt4;uY35cX2&BdTN53Z;&C! z%3z0&+H==xvtsAf7=XFld%)NB-u22zC&!29qiC22g4`bd}wxid%o2w&-cuC>1n8yxY zW)o;QBe-p{Yd0hfy=ymt0n0Z&Hl5(+0+XTWX9hZdpu6ZCnjMW~#ZEZDeMvgMunvhi zfmGOqOm3-k_?7d}{u9_-=bI^qtb*FFD|nr8kuPKLA>cF{SSZ(DH>kGRk#6P-#bUss zv9GNuJq^lk4W#g~3X-9v06IZ_wYf^|P`{*JL#D;z0*-MJe;=t3!UD$WJg7<%bqtPM znK|y=vj@3xSKWs68Y7hpyVnFH@S;b70-t>=u+Yoj6fpGjQ{=4zG^jfO4yyxr{6`^x zP!Nj&`ne>Ybz4yFR~*2^=q8rUCgI00oQ&)?*ugKPm~qeihx^Bs2BEo!m6^uD9>GRj z-3inW=FnDBV#|1l5N~$Sw zFFgFb0_JT2>#Vac z5Q74-r!_Td&{)?%R>U@xz>km`s9pql%Fz@n(F7@uDLHYLAw;oQM(ju%t<%J0?~m4a zY`{gfxGX)FLaT%E8EEv?-lNe=nxj#|Pq#@-gQPU`A9pOxAOI4L?FAa7q~;V=c!x`XnNfsAE?lCG&Mxq0`n?DB8;l*;P2?e3!)fZC-wpmtW`QuXyby#E<{YxJ2oAtuRo3)Eze= zsLF2SU&46hFFJ$0(N2A~k~ll!soey;T)sJU=OcCzcmW|^wuq5;x=3kwc=*U+^PIPr zn7)#Ov9G@I)U&O=GX(f>a^S1c14E!o-C;`MX1^GQ6%JDJanaDk9_L0WgL#5%XA zUOD3g$w^*L@p77%hj}^6%XwZN!NqoN!v`lfI4{DFxAmAraMhSY_51K((Rs04ExEn~ zS1i;F%FRo0W%K~nUj21k=Z&Z^vHTj&%~nMc_wuclYm-B$cbO}$Z84$QitEeZ3MK`l zYULD9Mo77-!}#yW;grFg6Z+Pj`a4n%ziUZ3ZFR6FtRRNC%ctc%r>9#*r>|;?MQS5 z(&Ym}$dY;mHJI|)c$ulP^4|;M6hBKTAFWJ9s`wo+_fB}pkuC+oA#bB}2+7%T2!Xc+ z*AzQww}~;xe^1fP(`c?W-873)@EBDE)NY9O;%${XIl$QY04nov4EPKqs*o$5oj{Nq z4u`&iV<{24gt!V8Gk_84_xY)c^FrKK01Zv+?<5=~)FPzyn^m9RzT!UXI>U9cM4fN# zw9art#SgH@Ch~_k%Ix6rUA7t#v3-6CMEp(GtP$~D_&XwpoM_pPv?)_K=eK3z+-H(K zUlQk>_v52ZFzz4WyRKo}AFvb4#yIC=e7!FKLK{@OMWNbnueh^qP)*nQ$xiEB1*#1w zo~O|8FM{GHc~icgxKh=hx_I3LQj>Nt=YRN>zlZ< z_JCdT5YQCX#8O&Mi7yNJ??iQ(RVY%>Fjg|$Fz$_`(?jO|hwE`~lTIS;O)%hv?aay( z$!%Wl*n-O-E};vHg^N-UabC&Gck>Ir?2ss~t;>p*29%g5iN#$AirX=6o=@hB9T6Y# zLc_>7MRQ^V4gI&f8ne$@1MPm$+_tF)~{hRWzyg{de<-uKr-2;?(A> zT_^lL45@g_jcnPpb3WXLl$`$t`5Cd_z0QsU`-L`5tYm|RR!zg>z}RUE2LKAKq7Nn^?*#2M^YXk$P*Za-O&g3hLg-@sN}*W5nb``? z>f8?FU^{N8WB@iou^2CfMK)HZj}C#u7#yvJ14$riSfjn%Rg#Ubq6RE(ir>f)7U%Ie02pO zMA7RACCq`hgK3xOQF|%=i9n(QV6K85-*}GlDym7u1s16wC{Jo@dMCt138)b@Lsig* zupaGMn?54@(ufV2v-zDiXq=#s9VTG>+7+2>4}b!-%WUL(Z5ruddP7=a?|oW%RZJc& zZ=TGKt_T|2qjPM+BBErP4Gi>k)jDksiOm9uSgdxtp>>IDzfWn4e|-(eHg{3$`xiyl zMWv@Y-8)IBSXdR6ACht2o|Y5e5V68c;wplLCO}l6wjto`8no^s*ieEhw+5~I?`XND zL1JRyD|&^2BdFM?#W(hD7R=GG(N^A`E@8E$u=;KpUI*N26~ZVlf+23fHWNtdGj>i&AOQIYB#bjk@=t^^M_@EI+){C zkaasW#}Epobk7jUM~8 zYr8C3W+KyP+cdH&nqcpJni%WK44NHZA#?)W2bO6V-XdPiHdgDOtOa!9lCIszx~a_hZo&ppRc zU5~+QzoArS{)HTa=bR#odOa>d?m0dQbI-BJA--SR+;gM>Fubc>3bWfj9^Eu%M2(q6 z25+nr7D9++{?)w@yK1sx@1f2^?4Ud8{cuYJ*v*X7M~YQerh~(3`XLFdqv|tVFBGu~ zP{!7GtSM0Ktj(8G$b*aBtP3tuIt=$eP_E@=L+mutaVcHb*XduDIx{CziKG~m3R_p|$JGmXhi)pFe(}ea+p^Bw8 zAu5oZ*nA{8*+Q;qNBn1W^uRVOTlvgoW?4ayzuac5xk9#DuH?%lJVuDDK2-&IYRW!I zV*mLeA!1TxkfzWNs^#WWy@N`;0?(ap858%(Bz(I~y*m`1nlp)si$`JJf4k)(HNJ%$ z04XFM48Y7I3o`Xwq5SgiPvz{(*38UY#h&fI-KrES!AsoGY65)q)hR%!-hU2o>T$?# zh}OGXKrD{x_UH`g)rSG|(-9S;>rjV?@*+z781E;*?eJ0<`a8WqB*xZM^>qS~V|ctq zZ5l}WuX>N92U>tUj-*CRxIy4XNfdRyv6iTFQ;TuLQRg4c=L=aV89~5!lYm-@5A^Ye zL49PDSp6x@DjdSe2$?!`8UG-R;r>zj(n`5ZH3yt3k`3d+P_v?oFLizp=sXumuEVnI zbaA-<40Crtv9VRPezr&8sGw>DBlF-5(5C_yg?2;iiVw#06@0r5`XpQuorTB#&o)hT zpag~P2=*PkQbdS$?DTG0Rspu{(J3_2D6SS9XM72K#A;KXLy= zyFH1CaN&^S>tt|o85ysg-Vd#zrMP7yk>epOmn|JAZ!4Y)$&!x1N4x~E|MXo zQ)R3N&zwJKVnVBz3}}yZ7b`+l@=&tkh$=bQM3wv&RY|OMA!TKtOF}bLwikF4(hfm% z!f1dY7d%!oS#qt7on$R*a zvqBN1g`Ogf{zQqYHE=H%SU#!bgHf~7%|1@v|`R+^NNVnBt zy%ug_V-w^nAc-%M5Lb~*)S82sEu!@G^xsw!$%Gu<;6;+ zuqV=*e8c$h@zM10@w1~3OpK+c#vVRDJ~ejV%TYXAsZ-r4g7!g~nW8;g9-;h8VHeoZ zBBsZU?fvR;TMEQVDSQ+VUNaj;FGVC^9aluZO&%LaH6=pw!l7Deq=C&2?-&$JCcqEY zX~)knt8SH6JoqII9bYafrt7P$pbBAt`LbJs+FFEA<>6_Uk!mh9nK=2;Dv;UE%Hqs2 zno1YNxyO^l)w2=R&kGxjLc%qxIR6My|p7vaNxBU({b(f=9UmJ)ulok0_l<( z`*@osR;9+W@4g!Qj4}&Ed_E`Uas^-(k0a9v**H@YX9yZ8m%P`k0xL`=yfcPgW*6Vl zrD^gu@-#Xe$rx`Ah@uXzia@`gZnA+>(061z?+X(j^mG2bWDV0c=dytLis0 zPx(ZcP=^x(i|NC`IGy(sL7l(gBFG2uCo;nMbBv?9))_+*TPz-A(~dafT6~8#!eP~m zI7f}w?(Ku95$hl&)RoFmC?!C10B^++=|`7kbF3Kf2^ zC@(L{i;MEo(_t=PsZ1L> zvGyA&G8F&+Kv)Q6|9k$`y|v$OCM#ae+HctZCDwjx%J$~cE;PV7TT-FWNW(dd*f5u|`b;y|fX zuEJ~61!G#jn}mLMOzHo|ilgho<~%nsrT^(UNhux71$6Uite*Q2pMXcZjw!KDm2)=2 zzzJQoC4&BL@mgalD+m-vsZ+~|B;X6OzKw=c zbos>4<#|6E15I}K9!+%ipg5W|Q*lGlkkGTC;u@uGR9q2%rXX}etnFCEWfj*|-1aK2 zfcb^2;ySmJU|xbt5RJ$uVKgF(9OC-5jYcF5K!QKjC5?2;1oC#4Ts!a}i18jgHCW07 zAt2;;f`4^S4;@cd>>cYg1`%nkIiMv}P?g`$Ezi`D`dXIY^>;Z7C%PkMgwp8L_&H0% zCl=(f^3M%Oz9~82?x=omWW`a{&jf7~)o_ zUWz;q#abBwl5HTRWy;$zM(rO%Kawt>6?wbD3D#*3mzv-UAID*MXYA@+!DW~&*Fp2} z&B9X;qY9=~?htG+1eRbm0Ff)OuW%3_7EMbF+8DFS)7%CsDmRv5D+@|TM>PTQh$`;7 zNHl?$BtWrqi1oCen%eDc`D`Qzi04_Xi4yUEi~amJzOpwe@vEjFmD8Xv=VC#+LrQ^y`c z&7%*DPmG^?^pJI8{M;n#Jb_9_tuv!j=f;nnpBSC8&YYh*bNcKUnm>+eC&wpGOre#r zQ)83ohS4gXSz~X&jdk{+(TNFZYVX3DO^wj8s?A?@g8XcaJ*L3e5d1y=?p}kT3f9%}&=}8Xb*y+i0Q@A~Z zQBIu;K7C~T?ARe|bZY!80divMG^!^!@daTv$?qn|R4oLx^g5Ukyh4DT$Do7093LB< zKwX%gN#kV`>o5F^cC!mMEAtB;dwqtVM>A}&l8^SDmFQH#aaNLWzyv3U#g`W&1#R3e zV(poBpA!n5j@>S-;=0?Vz1u~={DOA7e3E4HYq$h%7d{EyE-Z40;n&vfLK=Yee5Ffn zmpxOA{+FmMKbqe+td(`!>;;3p{9D3Bi1JVQSNEJTUrSc(O?K*xp+7&(VW$0IF5#E? z^|L&>(tp_x!ZZ$o57r3gGMCiyk>1b~oY0-ceu*l5IO^nh9N|6OKQ=Q1t4jy4*@#e9 z8!({g!YGvBVYfbuG=4NGxjuo=z z{xd6%dZmp}kBzh`Ht3ziJHdd%Yi^UPem{FcwXF;lt1cQ-W5q zB*tKLaVcOGwgM*wyauwN8C#yqFv^tE1D4mB1-L>=N3jz(g+01BVcs{ibV9={7HbtE zyXE3#I5UX2NL`-iA{`cK=hPZ1OuSjacy5eyxm`)4{N+rwAd9(!>iM9wbOIEh$SIbRVzNH!H^!v)W{p zg*h6TGN{++_V1IsVfXQBZRnnZsukS>bURyPqFMAdM5DnokHQF{rhj}48v7{hlhYG# zfO$dH9tIZ-kW+&ObG|-d&w~k5#I777Tem}fXn7QEvm-T~yElMM9pWX0%VPeFw{ame zhaI#dxV`E}bKc4qD0-cct?A=$6I36M>z;iiZ(K2KVN*ac!O%4#_kZabe!EA5F1awhZ34LE7*!7m+1lVKD z)_ZYlo>3{>b=N%xr7!(<6wU7J2AXYF+NH zQFC!go`Ky3e2-iE5j=(9jDqs@`B)g!cY4u)fj+P9J^JWv!Esyl&_>F zA{ykKm~tPBt5$Yz{*`J9MT0kW`3OVl*uW-nTsN@W8`#1?UeE^i>q$Hhk~KM>SCO=Q z5*pYna)|fW*1#qWK>7cHE*aQ2Pf=QG1KZA0jfQmPVSXlqZ}XnPf~tg)kZ6&Ab_hb2*>y|cuCeEV080sd512Kc`0 zTW^qmkNKunsPl)&ua{C2BaP|&7rwl52g~!vtrGCqzyTuumwQKizT6d0OVCPd^GUm0 z;tjWVz6A9Kyo^(1|uwziQ;mmSeLL8^EFL>xzAck$! zFy<v*QjIo{lV3A5oi|l2c0Cc^~0X;PB)E7{o5q$yKT_wGr`PRZZfI& zMHV8}`bfC$AuA=qn6&aq7L(O`5t%YLCPPkqPS#yOS47g zOQrHWa5I-#z-iS23t86HfEone$WuCN0>+PIvuVCMbTE{mgE0*5bz@P9yALp4GxhJz z7SN8%+woDROo9y6t0z85T7y3L1_@NP>sdmliIvTGjLBvnZdEVH-me)X->U8=%)#8B zb9?U*Hq`=zafE#kzyZ_rk&QOEg%#2oc2NjY)(Ug>FcDGy03r_^9L83T8Qu;J-tlPa zj=9twxxrXc#je#FMZtTzfr86d`bJtnFph+89jDF)G~@ne(cTdJ1UEPozfcL2=XA)n zC2-|DCysKr47X3KI1arfCW+od2je{9?Q7qg2vH0jOsZWE^d8yunqHhcCsq=7N`AGJ z4@Wn#jHq4+RgLzUhfFn31tl12QKADkN0&0ig`e`5NaAtA@@n(dl(q|-vQ}cQnnZN9 zju$pLuIme%?H4v>?qA3Yn?ufHB<45ra-NqdT*CZAEL@a=#W3fPyeyQ$mmSh*-TC4* z%r8U&fpYuSF0Fdr60UkyZLGv$bE?0xW-1l!UNqF%cMwiO!EfbX-COtkp=8D0BVAbc z^vAu-h0o7G;qA`Cr{A!@^cmdKPZC}G^b6n`VSF)mk|J*ogSWH`Jh9#yw5jemURE4kKsC{^iM`o=39QrAvXAtBfO4=okY4Ndm7nXZKar}m`&$Sx)gCnjOdiQ*s50%I)Eqw|4Y&1Q@#S}hv5$1>r=vO z5xsE8Cw=|;)nyA*EX0R_QEfsPIB!y4D2xx z8T6d{7%6Qi`Jf_ew5&i=#@-lnUkW@tdf!SdUoF>X^HNQO>>!zj`%h4uFs-xS%W&+` zk@+-}g3v+&bse%SBS9Ab3KR87^_42}zQJa+DxBmHOKO_U8ufrXW$!8J^P%U z14Gh&ruTwqkMd!}2{K`q-V)lSKKi#bOyyl=nELMq3U6D>RJ2)BuXRG+(XnO9Dz00m z?JZLQ^9$KBb^axZ>0eOT`60J_5{BHe$RQnE+mKt*05si&E*YQOr}Qal%nE5}N}gF{ zFrM!b7D9;MT4bM(LZNsvUFMgAnob!*T28{H%+ zN?ulC7!xakQx1r_h$I)|BI31GI#aG!naK$I_aI=^IbP7M{i7GuiKsrbbyBV)op!-o z8R*~9dTJ8qDQs5V_j{stF>jPp2aWmO)27muFn21TQz?M5G$ zSu0B6wUrPM+jE1Gh$g0D49j1k-*vl15EC6zas;Fa|75W-#8h|ZB3z@tD_OA<4tq-z z^ZK8SYZciQnxpGNT7HAbAo| zaF$feEAz+C(ZOM{#2+4(N>(;N1Cd^nobT@VX+osY=A4OH$z*!o4mnIHq=)!YMv^4j z>WL<8s%4e1Q^e%u6YK5>v8p4eOloaXZb125ccC1T>%+;4BjmcX3Az39S(QW}{!1oRKpIXV)`!lWI|EBrHo8LaAxShhZ67&uF>m-#tvkeFPXBH?QS*ZId~S(uM$f@*UZOYgDub=nf)ahOUvJl{HRAi;EUuDVTXdMmAo z6-`KOXFzHio8z?B1ff?!Y{bLm6Wfjmv1N-mH9UIObiB}j@RQwza7boXk`+hD>_8JT z`*~61G~P?Hdmz6eD#3b4eN8<29mT^Tm4uiS5ef|kw4)Pd`gJ0gK8{-iHRsCEFc}<5 z0jxZ|ik#`ffgpsmov9-hkO@$jB?W|6saJ8ZP=dUkFP%0feK9#V-6ku&#n{AxCSc+oTlQay9$CL#x0+qqJonc5Q_#)78~&WNO$2I64i&36-S8bMNNq6gh^D4jS-y3 zFNsRV78fHDro|G*)wshogiV_qrCXI6K>K)dY~4o67b_FQgklq<4Am+%_i?mLPX8(p z6SEe#u)OVz=>%obz8W`4BJ9^72_;K7&gbvBFOm<+DZkWKmvUlvg7$EK<3cB~rNWZZTZ`_izppL)H?fsGP_jedua5Q_gR{?$Df+!9(< zO$LnD@9Zj!!?5>ttT-B}VFDef6$@nW?umrGyO{;}aPr9LULI6idI=WB#Ntp)qyihn zzM|ZMncl0T3KTGuFx`bN_js8*go-`}AOV%wGPn0D=i>N4#RqwSkO`465pPi^MoYjvJF&t{8<8b_*_t0R8{_thXxe=!4-q3q9sED8IQAXbVo6MT~ngqV8u z8`rw`aFq!bG18t8X)*%5BG|-Nf>9VmV)dR#wkZ@`5*>y|6?5SJCmhv=#DF|Oot#zA zFOG!>GyTQ(%dGTc0|>*t2f~gPK!|gSvn%Hm2n|vnnLI=?ryzvrbum`blNO@9=H9jB zHM?5CHO_1PPE%_2nFvn|Ao*xSql+INdb=7U`A#b_kZq#($abJPP!a~rHYKZ5qPfJ) zCtax2nzojcs4%87Y_I z7f1v3>AKf9XV)^=50+^-zeM}umvQkn@(@y-JO7TxRKwd`E7Y(6(74(da%z0#OSnW1 zoXQ_J{K5mrWIeF1lI-f$a7Euw6DljpA*#uVt}1A$^GmnhH+CgMb>1}^e%~q^eq!l{ z_xk+W9cGeK*_3GX;UlZ;!{w#>aPSek2&XjsZ)Kc7a$-Uv=ME1a9zJq72&Rzr+?NEE?F{wPss?Yq(}icB?r<0=~$?^OH7o4A)$+qbW-?Yh^`UE^^%MRB8K2Km1u zOq^WU5mrjB=LqW}19F6mkQ=Cj>PedFfKU>PIog_*)l&d8P%_qYs+XSecM-(2Pj&Ni>* z8z|(vw>e+P!=>{xyuJ8zjPUEkz<9?-X_+iTsjO(UL@loK_I*l(B3&0m))oN`Krdka?S5 zIwP$mWWqmRJ>mZ(D@SI`qe7A=nrenX>QlSB!}~bgrFh?6Xls>P-JMcJ zOWx;nRXva~h3dgWo0ICnqkI-o4>pAAf%8U|bx}ol6YHbW;42`~LPBY{3$H1qLC0|) z>lExEJt?rSik@y)GuLg#xo5ltCnVx2Wu=g3XqjAe4$XE$wkPo0+xyajYC46cQ#I9e zLTWjk0zYJ_vz=BEGBXAI4(3lYGqHbLF|)4g8|}P+&vt1(IijWb)%!aId^mUS?X-$; z?w;wi3O#q?u~_lkbu5Qk`%pjErK#h9mYTYM+$qq)sr$54(Nfg37U7+$*+6@2euDPP zSdN@+D1+bZ73{p7U}L)3lr)=QVgG*hu>5+bRfJ^lRW%*mmLCTuCQb>sts&-CVW8K8 zXU-oqfWLW9cVtKcalQR7Ef(hl>?vtIl}XK3>Pf|X1A4GZ&FO|ttB6lQhf+j&9~4n_ zP(|2_Pj!k*njgwi%!>J8r$pU)Rp+%`!YYnuDR%gpPA5NPhetZCBAmHbby|ga>awn+7_qLbF(j$5`<^cFba1$99Z%Y3ewj zrKaw+odPZ7s;`kMT5{FyTjATJ=mwLnoY3P$5s!`H+EV@NC=Op z3F)?t-Xym{6z*^D8Qgb;aMx)U5`cfIQ>cY#e6`amdJg#b2KXq{f4*l>9|)n&UHJ)+ zf3#D0g&_Z#POIoS$fGPqZU2q}{R=$WwAQ_ZbqJmtE5-BHyzoTcRb|LGKDA^Lo;(<;J=``b>dP!m`8 zb|PFejhL>qAGzgkcR0CD!pZQ%FrbqgBzpAZdI=rHL;8O4kj^^X(fxxTdJ*tLBNT5^ z;l&-5r<0o`MD*mky0?Q3x^`B@kug$KJwh(E$fI3&t5&t`w2nVh5v7dTdkisgdl*r+ zkCy|y+=PoQ{bS)c*Nac`a*CJJygbaySzgZb@(3<=-PxQeMUME-mxd)9lI3Xv2WoWI~m@wx8WVT9o*Hu0Y>Yg z_W~%l@$QDV+Yin&p7!U;b4Vj8n_V!w-nuNu>20f)%eAz-FgIN;Hk=uMwa&`CrXZ&eBO zT)QUY*rIQP^1f`Lrn-P(@)PGP(PKL5-iBx>gftbxua%ds}0ijoxxD&Tmr;@GFqL*y>n7NosjES2}0)-{zifZToKE8Mt)%M z{u#>jsPb>|JG%>5#m;fh5nfg}FGPqtA{TzR+KV3&A$~}N_#qMEheU`U5+Qy_g!my5 z;)je4C#TlE^^T&bDRc!CxCL24C4|a{@kZ?K&I^q68w2@_n?@cs0j&C#9bA6@DK4OLfW+fm)o58axO zthWuR%4f?G#mcVfm%Poz%(PvU&U$?~P!Tj>Uuif$ihA?My#cp^(;BY~i;Kqw@ubgH zGbLATmN`=`Us*uN`H=Htc-z~VE@qxsNZ0fWHcA{1gYveha~g{cZxaGHPF7Il*voZay24ZWgFIi_Yvv$+|el${}(Mjj;NwwZyeHNzCw|6zLAA#f>LR~C+=9YuKt}3K?ZLV&o|-h0be&4=FnTW;ce5ihO~YF`mPg*Y9@<0 zUn)F)Ty5S%q`C76%ndmu3FH}Xpf+DlAx6694p&OEm17N4&WRlR+X za-0l^Fd#-c*rnm!Xcn+_4uK=}_M2sMnMwtWmnFBFB~o2FQht+Jj$L1oCSxC~9)%L$ WJ79hcj$2!(s2(MY{La>=hyNcadH3Z2 diff --git a/documentation/build/doctrees/index.doctree b/documentation/build/doctrees/index.doctree index f358705ec744e29e5a3e829bd4dd0bd0fa442d0e..fda9448d2491ced54751cd29bff7ccb693d95079 100644 GIT binary patch delta 664 zcmbQBu|b2Sfpx0tMwb07Vw@$Z#U;s!#i_+p+NSvRuqP&$Waj5h-pgXbl97~=GWjD* zJ!979LRLdY#@5LT*`+2oa0m$MXNYAqWvFE^XQ*drWN3OzPQJ&M#W-iOC%XcW%wm_4 zos+>HJ0(M~ha){TFEcMCwPH%^l*TEwQ#61&R{_m&W?cbfEuCDzAv0NnTbwCl>*V#E zT9Z9Eq+XyGU06Ixq_E--`Zhi{H52YEZ88GL`0G)6H z#d#o+E4ZB(!6h3AcU~3;BLf4f3lTzaM}5RQEIXyx1QWFxK?E6%K{a1K@=+pzycT~$OaV@#th@s$$~7BlR0?=c_$;qNYi9> zo&*-j(j>6-<|RCT85y%TFXDT`%vGOJljRCDFUx821R)X0`c5#fvou2zloDz(bh4a) NvKdXA_X-6w0stV9)#d;I delta 632 zcmdm>F+qc+fpseDMwb07lkcE;DH=eX z%Yf!Mvn~O$7EYePAv4*7TbwCl>j)=3 z0Xj)r@=y;)Zhi{H52YEZ88GKbO?Kc?fjdnK=%5R@omRmm8wht=77HT-1FGu~LU0En zgamuoic=Gdk~0utn8gD$0?Fwdz|hR%1c^ZcRDw}#aw4~$+#$GDWT#656|+GUD+s^> z7bM686%@t{-_*&1ERvHoc?5a;kz%86vO7-#i+E`gSbFmjp1+KY>6;JnJz?gm&Zx+8 m1e%v+H+h4Qh-7sqnAcgFAqh$a6&X5Nc0k#Ty3Kcmf*ApkGRw>W diff --git a/documentation/build/doctrees/installation.doctree b/documentation/build/doctrees/installation.doctree index 4eabba053735073a50caab40b63c0db8ab034c73..2e9f2a122710e9dcc5910f18d42df4dbdf1931ee 100644 GIT binary patch delta 1373 zcmcgsT}u>E7VxkoVaaf!PY@=PI7W^wTr@~MThi2BGx4!1Ca3bBa=DszFP26z~kNKW#c#E)fM zGZno7bbP?hI<_q8ZXmzvOYBS`6DTxTLo`%3sg+2K&L?iVhzkoyBu9?|T6)N;NCa-9 zYc$gos`3DpOcw<{G=gSj&KR#~8tVGeWXzP5yQst+2l)KCkepl#$w>VOoweff6 z6Nw6;Fm#17)->C~Wiw!h`-X79LZjkn6NeE2^F0nZs?8Q>i}{BA$3`2of)@!egN)gR z(b1u{PNdYQ0hCR*S(0yBa^otKShx_JP0Xs0Q*=!^{iAa_h&q5&BGU)Qe$QJoSx3Mp z{{?UE%TB6S{nMklw&7a)T2Xub%cqL@*jwiCcimb8RX|3vG>BD!1Pxx7D{~ z-YBT?!Ry8haOO_G2W$bb5RsAks!AtU`@+^-1vM}{ox1ebb{h-SHJN&u%nYtb0$&P& zJ?>MH$oiy-+_Pa5)e( zv{&@}WYS)SUy!rZkkbhRw6rW~Eym35!tm!#8YS=#6vn%>ZYIMEV oH61Lg9`^M=%GPx|WPc|r*$K$FzKn2vWRKj5K;&STr0yEO0FWNu2><{9 delta 1338 zcmc(e&ubGw6vw;vhc-lO(xeCqnfj|qYoi`KNJ^z3{=ll>59+~Ur@L=66F0MQcD5yg zLlMirTxxiU+*OX_||mMPY^@3|t>jDZc1&qbi9u%Q^U{qr#!iMnQP z{Ewl#W;UYtc4$3yXxcidzofDae$8KoC6H&#Dnn24$uIzk+gD+YEOXBziy**7_gecw zr@2iP`i*H=S4>>O#?6#$G0CNfmhAKY$^~ykm+=s-qDfoZOd2ji}+cbXGbfR z-bzpEPs0bSyk0w;wdVDBYHrBGNdnbT*1Su#?^>l;Iiy!pm#hinmi0^jNWDxZW;}e? z0$=VD=*yXWdZB56gqk04<;yym(g!oAd#4v3>2mt4mDBIiSFJHra6XZ3oR@Aqe5%E#&oNH&KNv8-=7p^UxQV|}0(F4|{6d}hhP5qdB(jR_@%qhLb6R9CSm!oAWWVmS-fuiT^1YWS z1XP}wIkwnl#WVl7IUbnfJ|1B-X#7z18U*HfR7}MR+gfQRuQU^1hiue@nw%utWOten R_N_^%+{TKHkH_v>e*k{2(%k?6 diff --git a/documentation/build/html/_sources/index.rst.txt b/documentation/build/html/_sources/index.rst.txt index 431643d7..9f32cc73 100644 --- a/documentation/build/html/_sources/index.rst.txt +++ b/documentation/build/html/_sources/index.rst.txt @@ -17,6 +17,8 @@ Welcome to WAFL's 0.0.80 documentation! running_WAFL facts_and_rules examples + testcases + actions license Indices and tables diff --git a/documentation/build/html/_sources/installation.rst.txt b/documentation/build/html/_sources/installation.rst.txt index 2eb6156d..362a1398 100644 --- a/documentation/build/html/_sources/installation.rst.txt +++ b/documentation/build/html/_sources/installation.rst.txt @@ -32,20 +32,24 @@ Please see the examples in the following chapters. LLM side (needs a GPU) ---------------------- +The second part (LLM side) is a model server for the speech-to-text model, the LLM, the embedding system, and the text-to-speech model. +In order to quickly run the LLM side, you can use the following installation commands: -The second part is a machine that runs on a machine accessible from the interface side. -The initial configuration is for a local deployment of language models. -No action is needed to run WAFL if you want to run it as a local instance. +.. code-block:: bash + + $ pip install wafl-llm + $ wafl-llm start + + which will use the default models and start the server on port 8080. -However, a multi-user setup will benefit for a dedicated server. -In this case, a docker image can be used +Alternatively, a Docker image can be used to run it as in the following: .. code-block:: bash - $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:latest + $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:0.80 The interface side has a `config.json` file that needs to be filled with the IP address of the LLM side. The default is localhost. -Alternatively, you can run the LLM side by cloning `this repository `_. +Finally, you can run the LLM side by cloning [this repository](https://github.com/fractalego/wafl-llm). diff --git a/documentation/build/html/examples.html b/documentation/build/html/examples.html index c5e07594..b8761aea 100644 --- a/documentation/build/html/examples.html +++ b/documentation/build/html/examples.html @@ -55,6 +55,8 @@ +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • diff --git a/documentation/build/html/genindex.html b/documentation/build/html/genindex.html index c7713889..cfd700d2 100644 --- a/documentation/build/html/genindex.html +++ b/documentation/build/html/genindex.html @@ -46,6 +46,8 @@
  • Running WAFL
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • diff --git a/documentation/build/html/index.html b/documentation/build/html/index.html index 56d3b8f9..0d63a690 100644 --- a/documentation/build/html/index.html +++ b/documentation/build/html/index.html @@ -48,6 +48,8 @@
  • Running WAFL
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • @@ -110,6 +112,12 @@

    Welcome to WAFL’s 0.0.80 documentation!Rule with remember command +
  • Creating a testcase +
  • +
  • Running Actions
  • License
  • diff --git a/documentation/build/html/installation.html b/documentation/build/html/installation.html index f6aab704..c836decf 100644 --- a/documentation/build/html/installation.html +++ b/documentation/build/html/installation.html @@ -53,6 +53,8 @@
  • Running WAFL
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • @@ -103,17 +105,21 @@

    Interface side

    LLM side (needs a GPU)

    -

    The second part is a machine that runs on a machine accessible from the interface side. -The initial configuration is for a local deployment of language models. -No action is needed to run WAFL if you want to run it as a local instance.

    -

    However, a multi-user setup will benefit for a dedicated server. -In this case, a docker image can be used

    -
    $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:latest
    +

    The second part (LLM side) is a model server for the speech-to-text model, the LLM, the embedding system, and the text-to-speech model. +In order to quickly run the LLM side, you can use the following installation commands:

    +
    $ pip install wafl-llm
    +$ wafl-llm start
    +
    +which will use the default models and start the server on port 8080.
    +
    +
    +

    Alternatively, a Docker image can be used to run it as in the following:

    +
    $ docker run -p8080:8080 --env NVIDIA_DISABLE_REQUIRE=1 --gpus all fractalego/wafl-llm:0.80
     

    The interface side has a config.json file that needs to be filled with the IP address of the LLM side. -The default is localhost. -Alternatively, you can run the LLM side by cloning this repository.

    +The default is localhost.

    +

    Finally, you can run the LLM side by cloning [this repository](https://github.com/fractalego/wafl-llm).

    diff --git a/documentation/build/html/introduction.html b/documentation/build/html/introduction.html index 741fbb29..dec9ff0b 100644 --- a/documentation/build/html/introduction.html +++ b/documentation/build/html/introduction.html @@ -49,6 +49,8 @@
  • Running WAFL
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • diff --git a/documentation/build/html/license.html b/documentation/build/html/license.html index c769ebd6..8c2b7507 100644 --- a/documentation/build/html/license.html +++ b/documentation/build/html/license.html @@ -17,7 +17,7 @@ - + @@ -48,6 +48,8 @@
  • Running WAFL
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • @@ -88,7 +90,7 @@

    License - +


    diff --git a/documentation/build/html/objects.inv b/documentation/build/html/objects.inv index ead68abdd1b9960c432e4a381e0507cd07c4a9a0..1f53eace8dbf3e29944f7ff97e06d0a2cb45700c 100644 GIT binary patch delta 387 zcmV-}0et?<1MUNmbbm`~!!Qut>lEq%64G7iE|eAuDTKC=rBURGEzl=OGL5_HHF~|C zr21H~WE-;cywA*dG()ImN*Ziz{a7oCSNE7iXx&=D@p~sFlMUWR5K$#htm!oIVeTr3 z=zYo|1Q#OC9Ews2i~8pwLMPXPsfesA-Ax=5DjGjqz%lHJ3x7|H18TtaUBe}qIXMFN z_Jl^?tN%o!Y^aeb4#24rfUUxZ?aS`(=hxuPdb57mU?=VbNE`m{&Shbi5zfA6lPQ^0 zImevYSi<>&b;Of#M4ZiPRd;l58}Vct5yTkpq1Ps)}^|!m*&D@uLMC%YnMG<^oUyimM7P!7S(rgtsR&`hQ;hCz@nqk4$j{!L$Ty zH9kH(?|;943@(eZxGS*}_Y#y1x4UsE+;S@T*J?6zCUwp+XEv4!v0SFb*?guh
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • diff --git a/documentation/build/html/search.html b/documentation/build/html/search.html index 785dd13b..e4efc086 100644 --- a/documentation/build/html/search.html +++ b/documentation/build/html/search.html @@ -49,6 +49,8 @@
  • Running WAFL
  • The rules.yaml file
  • Examples
  • +
  • Creating a testcase
  • +
  • Running Actions
  • License
  • diff --git a/documentation/build/html/searchindex.js b/documentation/build/html/searchindex.js index e8b86b8d..844a7f7c 100644 --- a/documentation/build/html/searchindex.js +++ b/documentation/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["configuration", "examples", "facts_and_rules", "index", "initialization", "installation", "introduction", "license", "rule_with_examples", "rules_with_execute_command", "rules_with_remember_command", "running_WAFL", "simple_rule"], "filenames": ["configuration.rst", "examples.rst", "facts_and_rules.rst", "index.rst", "initialization.rst", "installation.rst", "introduction.rst", "license.rst", "rule_with_examples.rst", "rules_with_execute_command.rst", "rules_with_remember_command.rst", "running_WAFL.rst", "simple_rule.rst"], "titles": ["Configuration", "Examples", "The rules.yaml file", "Welcome to WAFL\u2019s 0.0.80 documentation!", "Initialization", "Installation", "Introduction", "License", "Rule with examples", "Rule with execute command", "Rule with remember command", "Running WAFL", "Simple rule"], "terms": {"The": [0, 3, 4, 5, 6, 7, 9, 10, 11, 12], "file": [0, 3, 4, 5, 7, 9, 11], "config": [0, 4, 5, 11], "json": [0, 4, 5, 11], "contain": [0, 4, 11], "some": [0, 4, 8, 9], "paramet": [0, 4], "chatbot": [0, 4, 11], "url": [0, 4, 10], "connect": [0, 4, 7], "backend": [0, 4], "A": [0, 5, 6, 7, 9, 12], "typic": 0, "look": 0, "like": [0, 2], "thi": [0, 2, 4, 5, 6, 7, 9, 11, 12], "waking_up_word": 0, "comput": [0, 2, 8, 9, 11], "waking_up_sound": 0, "true": 0, "deactivate_sound": 0, "rule": [0, 1, 3, 4, 5, 6, 11], "yaml": [0, 3, 4], "function": [0, 1, 3, 4, 6], "py": [0, 4, 9], "llm_model": 0, "model_host": 0, "localhost": [0, 5], "model_port": 0, "8080": [0, 5], "listener_model": 0, "listener_hotword_logp": 0, "8": 0, "listener_volume_threshold": 0, "0": 0, "6": 0, "listener_silence_timeout": 0, "7": 0, "speaker_model": 0, "text_embedding_model": 0, "These": [0, 2, 9], "set": [0, 2, 4, 5, 6], "regul": 0, "follow": [0, 2, 5, 6, 7, 8, 9, 10], "i": [0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12], "name": [0, 11], "bot": [0, 2, 8, 9, 12], "us": [0, 4, 5, 6, 7, 8, 9, 10, 11], "wake": 0, "up": 0, "system": [0, 5, 11], "run": [0, 3, 4, 5], "audio": [0, 3], "mode": [0, 11], "ar": [0, 2, 4, 9, 10, 11, 12], "plai": [0, 6], "signal": 0, "back": 0, "idl": 0, "fact": [0, 3, 4], "guid": [0, 2, 4], "default": [0, 5, 11], "can": [0, 2, 4, 5, 8, 9, 10, 11, 12], "llm": [0, 3, 4], "model": [0, 2, 5, 6, 8, 9, 10], "listen": [0, 11], "detect": 0, "word": [0, 11], "similar": [0, 12], "threshold": 0, "volum": 0, "ani": [0, 7], "convers": [0, 2, 4], "utter": 0, "below": 0, "ignor": 0, "silenc": 0, "timeout": 0, "If": 0, "time": [0, 2], "longer": 0, "than": 0, "consid": [0, 8, 9, 10], "finish": 0, "speaker": [0, 5], "text": [0, 6, 9], "embed": 0, "simpl": [1, 2, 3], "execut": [1, 2, 3, 10, 11, 12], "command": [1, 3, 4, 5, 11], "local": [1, 3, 4, 5, 11], "rememb": [1, 3], "languag": [2, 5, 6, 8, 9, 10], "through": [2, 10], "list": [2, 8, 9], "retriev": 2, "dure": 2, "written": 2, "format": [2, 8], "do": [2, 7], "well": 2, "call": [2, 6], "user": [2, 5, 6, 8, 9, 10, 11, 12], "want": [2, 5, 8, 9, 10, 11], "know": [2, 9], "output": [2, 8, 9, 10], "get_tim": 2, "For": [2, 8, 9], "exampl": [2, 3, 5, 9, 11, 12], "ask": 2, "how": [2, 11, 12], "you": [2, 5, 10, 11, 12], "add": 2, "its": 2, "prompt": [2, 8, 9, 10], "eventu": 2, "gener": [2, 8, 9, 10], "an": [2, 4, 7, 10], "answer": 2, "am": 2, "fine": 2, "compos": 2, "condit": [2, 7], "action": [2, 5, 7, 12], "trigger": 2, "match": [2, 12], "against": 2, "input": [2, 12], "In": [2, 5, 9, 12], "abov": [2, 7, 8, 9], "whole": 2, "ad": [2, 8, 9, 10], "item": 2, "order": [2, 12], "should": [2, 8, 9], "think": [2, 8, 9], "introduct": 3, "instal": 3, "interfac": [3, 4], "side": [3, 4], "need": [3, 4, 10], "gpu": 3, "initi": [3, 5], "configur": [3, 5], "server": [3, 5], "cli": 3, "test": [3, 4], "licens": 3, "index": 3, "modul": 3, "search": 3, "page": 3, "initialis": 4, "wafl": [4, 5, 6], "": [4, 9, 10], "work": [4, 6, 11, 12], "environ": 4, "init": [4, 5], "It": [4, 6, 11], "creat": [4, 5, 6], "l": 4, "db": 4, "main": 4, "requir": [4, 5, 6, 8, 9], "txt": [4, 11], "secret": 4, "start_llm": 4, "sh": 4, "testcas": [4, 11], "auxiliari": 4, "inform": 4, "about": 4, "state": 4, "edit": [4, 5], "manual": 4, "script": 4, "start": 4, "webserv": [4, 11], "python": [4, 8, 9, 10], "packag": 4, "mai": 4, "credenti": 4, "simpli": 4, "docker": [4, 5], "imag": [4, 5], "case": [4, 5, 6, 9, 12], "version": [5, 6], "built": 5, "two": [5, 9, 10], "part": 5, "both": 5, "same": [5, 8], "machin": [5, 9], "first": 5, "your": [5, 6], "have": [5, 12], "access": 5, "microphon": 5, "To": 5, "sudo": 5, "apt": 5, "get": 5, "portaudio19": 5, "dev": 5, "ffmpeg": 5, "pip": 5, "after": 5, "which": [5, 11], "chang": [5, 11], "standard": 5, "also": 5, "pleas": 5, "see": [5, 9], "chapter": 5, "second": 5, "from": [5, 7], "deploy": 5, "No": 5, "instanc": 5, "howev": [5, 9], "multi": 5, "setup": 5, "benefit": 5, "dedic": 5, "p8080": 5, "env": 5, "nvidia_disable_requir": 5, "1": 5, "all": [5, 7, 11, 12], "fractalego": [5, 7], "latest": 5, "ha": 5, "fill": 5, "ip": 5, "address": 5, "altern": 5, "clone": 5, "repositori": 5, "framework": 6, "person": [6, 7], "agent": 6, "integr": 6, "larg": 6, "speech": 6, "recognit": 6, "combin": 6, "predict": 6, "behavior": 6, "defin": [6, 9, 11], "support": 6, "memori": [6, 9], "progress": 6, "current": [6, 9], "specifi": [6, 8, 9], "while": 6, "readi": 6, "might": 6, "product": 6, "depend": 6, "softwar": 7, "under": 7, "mit": 7, "copyright": 7, "c": 7, "2024": 7, "alberto": 7, "cetoli": 7, "io": 7, "permiss": 7, "herebi": 7, "grant": 7, "free": 7, "charg": 7, "obtain": 7, "copi": 7, "associ": 7, "document": [7, 11], "deal": 7, "without": 7, "restrict": 7, "includ": 7, "limit": 7, "right": 7, "modifi": 7, "merg": 7, "publish": 7, "distribut": 7, "sublicens": 7, "sell": 7, "permit": 7, "whom": 7, "furnish": 7, "so": 7, "subject": 7, "notic": [7, 8], "shall": 7, "substanti": 7, "portion": 7, "THE": [7, 8, 9], "provid": [7, 10], "AS": 7, "warranti": 7, "OF": 7, "kind": 7, "express": 7, "OR": 7, "impli": 7, "BUT": 7, "NOT": 7, "TO": 7, "merchant": 7, "fit": 7, "FOR": 7, "particular": 7, "purpos": [7, 11], "AND": 7, "noninfring": 7, "IN": 7, "NO": 7, "event": 7, "author": 7, "holder": 7, "BE": 7, "liabl": 7, "claim": 7, "damag": 7, "other": 7, "liabil": 7, "whether": 7, "contract": 7, "tort": 7, "otherwis": 7, "aris": 7, "out": 7, "WITH": 7, "make": [8, 9], "clearer": [8, 9], "effect": [8, 9], "each": [8, 9], "suggest": [8, 9], "go": [8, 9], "math": [8, 9], "oper": [8, 9], "code": [8, 9, 10], "solv": [8, 9], "problem": [8, 9], "assign": [8, 9], "result": [8, 9, 10, 11], "variabl": [8, 9], "what": [8, 9, 10], "2": [8, 9], "anoth": [8, 9], "squar": [8, 9], "root": [8, 9], "import": [8, 9], "sqrt": [8, 9], "exactli": [8, 9, 10], "THAT": [8, 9], "when": [8, 9, 10], "request": [8, 9, 10, 11], "pi": [8, 9], "one": 8, "There": [9, 10, 11], "special": [9, 10], "tag": [9, 10], "host": 9, "everyth": 9, "between": 9, "substitut": 9, "valu": 9, "within": 9, "desir": 9, "date": 9, "todai": 9, "get_dat": 9, "As": 9, "long": 9, "def": 9, "return": [9, 11], "datetim": 9, "now": 9, "strftime": 9, "y": 9, "m": 9, "d": 9, "string": 9, "intermedi": 10, "final": 10, "summaris": 10, "websit": 10, "ll": 10, "content": 10, "get_websit": 10, "website_url": 10, "given": 10, "summari": 10, "check": 10, "Then": 10, "insert": 10, "prior": 10, "step": 10, "few": 11, "four": 11, "loop": 11, "wait": 11, "speak": 11, "activ": 11, "whatev": 11, "web": 11, "http": 11, "port": 11, "8889": 11, "act": 11, "line": 11, "doe": 11, "show": 12, "engin": 12, "sai": 12, "hello": 12, "repli": 12, "howdi": 12, "must": 12, "multipl": 12}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"configur": 0, "exampl": [1, 8], "The": 2, "rule": [2, 8, 9, 10, 12], "yaml": 2, "file": 2, "fact": 2, "welcom": 3, "wafl": [3, 11], "": 3, "0": 3, "80": 3, "document": 3, "content": 3, "indic": 3, "tabl": 3, "initi": 4, "instal": 5, "interfac": 5, "side": 5, "llm": 5, "need": 5, "gpu": 5, "introduct": 6, "licens": 7, "execut": 9, "command": [9, 10], "local": 9, "function": 9, "rememb": 10, "run": 11, "audio": 11, "server": 11, "cli": 11, "test": 11, "simpl": 12}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Configuration": [[0, "configuration"]], "Examples": [[1, "examples"]], "The rules.yaml file": [[2, "the-rules-yaml-file"]], "Facts": [[2, "facts"]], "Rules": [[2, "rules"]], "Welcome to WAFL\u2019s 0.0.80 documentation!": [[3, "welcome-to-wafl-s-0-0-80-documentation"]], "Contents:": [[3, null]], "Indices and tables": [[3, "indices-and-tables"]], "Initialization": [[4, "initialization"]], "Installation": [[5, "installation"]], "Interface side": [[5, "interface-side"]], "LLM side (needs a GPU)": [[5, "llm-side-needs-a-gpu"]], "Introduction": [[6, "introduction"]], "License": [[7, "license"]], "Rule with examples": [[8, "rule-with-examples"]], "Rule with execute command": [[9, "rule-with-execute-command"]], "Local functions": [[9, "local-functions"]], "Rule with remember command": [[10, "rule-with-remember-command"]], "Running WAFL": [[11, "running-wafl"]], "$ wafl run-audio": [[11, "wafl-run-audio"]], "$ wafl run-server": [[11, "wafl-run-server"]], "$ wafl run-cli": [[11, "wafl-run-cli"]], "$ wafl run-tests": [[11, "wafl-run-tests"]], "Simple rule": [[12, "simple-rule"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["actions", "configuration", "examples", "facts_and_rules", "index", "initialization", "installation", "introduction", "license", "rule_with_examples", "rules_with_execute_command", "rules_with_remember_command", "running_WAFL", "simple_rule", "testcases"], "filenames": ["actions.rst", "configuration.rst", "examples.rst", "facts_and_rules.rst", "index.rst", "initialization.rst", "installation.rst", "introduction.rst", "license.rst", "rule_with_examples.rst", "rules_with_execute_command.rst", "rules_with_remember_command.rst", "running_WAFL.rst", "simple_rule.rst", "testcases.rst"], "titles": ["Running Actions", "Configuration", "Examples", "The rules.yaml file", "Welcome to WAFL\u2019s 0.0.80 documentation!", "Initialization", "Installation", "Introduction", "License", "Rule with examples", "Rule with execute command", "Rule with remember command", "Running WAFL", "Simple rule", "Creating a testcase"], "terms": {"It": [0, 5, 7, 12], "i": [0, 1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], "possibl": 0, "from": [0, 8, 14], "command": [0, 2, 4, 5, 6, 12, 14], "line": [0, 12, 14], "thi": [0, 1, 3, 5, 6, 7, 8, 10, 12, 13, 14], "us": [0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 14], "carri": 0, "out": [0, 8], "convers": [0, 1, 3, 5, 14], "task": 0, "script": [0, 5], "cron": 0, "job": 0, "To": [0, 6, 14], "an": [0, 3, 5, 8, 11, 14], "follow": [0, 1, 3, 6, 7, 8, 9, 10, 11, 14], "wafl": [0, 5, 6, 7, 14], "name": [0, 1, 12, 14], "For": [0, 3, 9, 10], "exampl": [0, 3, 4, 6, 10, 12, 13], "hello": [0, 13, 14], "world": 0, "The": [0, 1, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14], "look": [0, 1], "file": [0, 1, 4, 5, 6, 8, 10, 12, 14], "yaml": [0, 1, 4, 5], "A": [0, 1, 6, 7, 8, 10, 13], "typic": [0, 1], "like": [0, 1, 3], "sai": [0, 13], "expect": [0, 14], "bot": [0, 1, 3, 9, 10, 13, 14], "output": [0, 3, 9, 10, 11], "greet": [0, 14], "again": 0, "anoth": [0, 9, 10], "each": [0, 9, 10, 14], "list": [0, 3, 9, 10, 14], "step": [0, 11], "dictionari": 0, "two": [0, 6, 10, 11], "kei": 0, "execut": [0, 2, 3, 4, 11, 12, 13], "respons": 0, "If": [0, 1], "doe": [0, 12, 14], "respond": [0, 14], "until": 0, "success": 0, "maximum": 0, "10": 0, "time": [0, 1, 3], "print": 0, "result": [0, 9, 10, 11, 12], "consol": 0, "call": [0, 3, 7], "rule": [0, 1, 2, 4, 5, 6, 7, 12], "function": [0, 1, 2, 4, 5, 7], "normal": 0, "config": [1, 5, 6, 12], "json": [1, 5, 6, 12], "contain": [1, 5, 12, 14], "some": [1, 5, 9, 10], "paramet": [1, 5], "chatbot": [1, 5, 12], "url": [1, 5, 11], "connect": [1, 5, 8], "backend": [1, 5], "waking_up_word": 1, "comput": [1, 3, 9, 10, 12], "waking_up_sound": 1, "true": 1, "deactivate_sound": 1, "py": [1, 5, 10], "llm_model": 1, "model_host": 1, "localhost": [1, 6], "model_port": 1, "8080": [1, 6], "listener_model": 1, "listener_hotword_logp": 1, "8": 1, "listener_volume_threshold": 1, "0": [1, 6], "6": 1, "listener_silence_timeout": 1, "7": 1, "speaker_model": 1, "text_embedding_model": 1, "These": [1, 3, 10], "set": [1, 3, 5, 6, 7], "regul": 1, "wake": 1, "up": 1, "system": [1, 6, 12], "run": [1, 4, 5, 6], "audio": [1, 4], "mode": [1, 12], "ar": [1, 3, 5, 10, 11, 12, 13, 14], "plai": [1, 7], "signal": 1, "back": 1, "idl": 1, "fact": [1, 4, 5], "guid": [1, 3, 5], "default": [1, 6, 12], "can": [1, 3, 5, 6, 9, 10, 11, 12, 13], "llm": [1, 4, 5], "model": [1, 3, 6, 7, 9, 10, 11], "listen": [1, 12], "detect": 1, "word": [1, 12], "similar": [1, 13], "threshold": 1, "volum": 1, "ani": [1, 8], "utter": [1, 14], "below": 1, "ignor": 1, "silenc": 1, "timeout": 1, "longer": 1, "than": 1, "consid": [1, 9, 10, 11], "finish": 1, "speaker": [1, 6], "text": [1, 6, 7, 10], "embed": [1, 6], "simpl": [2, 3, 4], "local": [2, 4, 5, 6, 12], "rememb": [2, 4], "languag": [3, 7, 9, 10, 11], "through": [3, 11], "retriev": 3, "dure": 3, "written": 3, "format": [3, 9], "do": [3, 8], "well": 3, "user": [3, 7, 9, 10, 11, 12, 13, 14], "want": [3, 9, 10, 11, 12], "know": [3, 10], "get_tim": 3, "ask": 3, "how": [3, 12, 13], "you": [3, 6, 11, 12, 13, 14], "add": 3, "its": 3, "prompt": [3, 9, 10, 11], "eventu": 3, "gener": [3, 9, 10, 11], "answer": [3, 14], "am": 3, "fine": 3, "compos": 3, "condit": [3, 8], "action": [3, 4, 8, 13], "trigger": 3, "match": [3, 13], "against": 3, "input": [3, 13], "In": [3, 6, 10, 13], "abov": [3, 8, 9, 10], "whole": 3, "ad": [3, 9, 10, 11], "item": 3, "order": [3, 6, 13], "should": [3, 9, 10], "think": [3, 9, 10], "introduct": 4, "instal": 4, "interfac": [4, 5], "side": [4, 5], "need": [4, 5, 11], "gpu": 4, "initi": [4, 6], "configur": 4, "server": [4, 6], "cli": 4, "test": [4, 5, 14], "creat": [4, 5, 6, 7], "testcas": [4, 5, 12], "neg": 4, "licens": 4, "index": 4, "modul": 4, "search": 4, "page": 4, "initialis": 5, "": [5, 10, 11], "work": [5, 7, 12, 13, 14], "environ": 5, "init": [5, 6], "l": 5, "db": 5, "main": 5, "requir": [5, 6, 7, 9, 10], "txt": [5, 12, 14], "secret": 5, "start_llm": 5, "sh": 5, "auxiliari": 5, "inform": 5, "about": 5, "state": 5, "edit": [5, 6], "manual": 5, "start": [5, 6, 14], "webserv": [5, 12], "python": [5, 9, 10, 11], "packag": 5, "mai": 5, "credenti": 5, "simpli": 5, "docker": [5, 6], "imag": [5, 6], "case": [5, 7, 10, 13], "version": [6, 7], "built": 6, "part": 6, "both": 6, "same": [6, 9], "machin": [6, 10], "first": 6, "your": [6, 7, 14], "have": [6, 13], "access": 6, "microphon": 6, "sudo": 6, "apt": 6, "get": 6, "portaudio19": 6, "dev": 6, "ffmpeg": 6, "pip": 6, "after": 6, "which": [6, 12], "chang": [6, 12], "standard": 6, "also": 6, "pleas": 6, "see": [6, 10], "chapter": 6, "second": 6, "speech": [6, 7], "quickli": 6, "port": [6, 12], "altern": 6, "p8080": 6, "env": 6, "nvidia_disable_requir": 6, "1": 6, "all": [6, 8, 12, 13, 14], "fractalego": [6, 8], "80": 6, "ha": 6, "fill": 6, "ip": 6, "address": 6, "final": [6, 11], "clone": 6, "repositori": 6, "http": [6, 12], "github": 6, "com": 6, "framework": 7, "person": [7, 8], "agent": 7, "integr": 7, "larg": 7, "recognit": 7, "combin": 7, "predict": 7, "behavior": 7, "defin": [7, 10, 12], "support": 7, "memori": [7, 10], "progress": 7, "current": [7, 10], "specifi": [7, 9, 10], "while": 7, "readi": 7, "might": 7, "product": 7, "depend": 7, "softwar": 8, "under": 8, "mit": 8, "copyright": 8, "c": 8, "2024": 8, "alberto": 8, "cetoli": 8, "io": 8, "permiss": 8, "herebi": 8, "grant": 8, "free": 8, "charg": 8, "obtain": 8, "copi": 8, "associ": 8, "document": [8, 12], "deal": 8, "without": 8, "restrict": 8, "includ": 8, "limit": 8, "right": 8, "modifi": 8, "merg": 8, "publish": 8, "distribut": 8, "sublicens": 8, "sell": 8, "permit": 8, "whom": 8, "furnish": 8, "so": 8, "subject": 8, "notic": [8, 9], "shall": 8, "substanti": 8, "portion": 8, "THE": [8, 9, 10], "provid": [8, 11], "AS": 8, "warranti": 8, "OF": 8, "kind": 8, "express": 8, "OR": 8, "impli": 8, "BUT": 8, "NOT": 8, "TO": 8, "merchant": 8, "fit": 8, "FOR": 8, "particular": 8, "purpos": [8, 12], "AND": 8, "noninfring": 8, "IN": 8, "NO": 8, "event": 8, "author": 8, "holder": 8, "BE": 8, "liabl": 8, "claim": 8, "damag": 8, "other": 8, "liabil": 8, "whether": 8, "contract": 8, "tort": 8, "otherwis": 8, "aris": 8, "WITH": 8, "make": [9, 10], "clearer": [9, 10], "effect": [9, 10], "suggest": [9, 10], "go": [9, 10], "math": [9, 10], "oper": [9, 10], "code": [9, 10, 11], "solv": [9, 10], "problem": [9, 10], "assign": [9, 10], "variabl": [9, 10], "what": [9, 10, 11, 14], "2": [9, 10], "squar": [9, 10], "root": [9, 10], "import": [9, 10], "sqrt": [9, 10], "exactli": [9, 10, 11], "THAT": [9, 10], "when": [9, 10, 11], "request": [9, 10, 11, 12], "pi": [9, 10], "one": 9, "There": [10, 11, 12], "special": [10, 11], "tag": [10, 11], "host": 10, "howev": 10, "everyth": 10, "between": 10, "substitut": 10, "valu": 10, "within": 10, "desir": 10, "date": 10, "todai": 10, "get_dat": 10, "As": 10, "long": 10, "def": 10, "return": [10, 12], "datetim": 10, "now": 10, "strftime": 10, "y": 10, "m": 10, "d": 10, "string": 10, "intermedi": 11, "summaris": 11, "websit": 11, "ll": 11, "content": 11, "get_websit": 11, "website_url": 11, "given": 11, "summari": 11, "check": 11, "Then": 11, "insert": 11, "prior": 11, "few": 12, "four": 12, "loop": 12, "wait": 12, "speak": 12, "activ": 12, "whatev": 12, "web": 12, "8889": 12, "act": 12, "show": 13, "engin": 13, "repli": 13, "howdi": 13, "must": 13, "multipl": 13, "consist": 14, "titl": 14, "bob": 14, "nice": 14, "meet": 14, "pass": 14, "wai": 14, "fail": 14, "thei": 14, "certain": 14, "prefix": 14, "correct": 14, "unknown": 14}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"run": [0, 12, 14], "action": 0, "configur": 1, "exampl": [2, 9], "The": 3, "rule": [3, 9, 10, 11, 13], "yaml": 3, "file": 3, "fact": 3, "welcom": 4, "wafl": [4, 12], "": 4, "0": 4, "80": 4, "document": 4, "content": 4, "indic": 4, "tabl": 4, "initi": 5, "instal": 6, "interfac": 6, "side": 6, "llm": 6, "need": 6, "gpu": 6, "introduct": 7, "licens": 8, "execut": 10, "command": [10, 11], "local": 10, "function": 10, "rememb": 11, "audio": 12, "server": 12, "cli": 12, "test": 12, "simpl": 13, "creat": 14, "testcas": 14, "neg": 14}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Running Actions": [[0, "running-actions"]], "Configuration": [[1, "configuration"]], "Examples": [[2, "examples"]], "The rules.yaml file": [[3, "the-rules-yaml-file"]], "Facts": [[3, "facts"]], "Rules": [[3, "rules"]], "Welcome to WAFL\u2019s 0.0.80 documentation!": [[4, "welcome-to-wafl-s-0-0-80-documentation"]], "Contents:": [[4, null]], "Indices and tables": [[4, "indices-and-tables"]], "Initialization": [[5, "initialization"]], "Installation": [[6, "installation"]], "Interface side": [[6, "interface-side"]], "LLM side (needs a GPU)": [[6, "llm-side-needs-a-gpu"]], "Introduction": [[7, "introduction"]], "License": [[8, "license"]], "Rule with examples": [[9, "rule-with-examples"]], "Rule with execute command": [[10, "rule-with-execute-command"]], "Local functions": [[10, "local-functions"]], "Rule with remember command": [[11, "rule-with-remember-command"]], "Running WAFL": [[12, "running-wafl"]], "$ wafl run-audio": [[12, "wafl-run-audio"]], "$ wafl run-server": [[12, "wafl-run-server"]], "$ wafl run-cli": [[12, "wafl-run-cli"]], "$ wafl run-tests": [[12, "wafl-run-tests"]], "Simple rule": [[13, "simple-rule"]], "Creating a testcase": [[14, "creating-a-testcase"]], "Running the testcases": [[14, "running-the-testcases"]], "Negative testcases": [[14, "negative-testcases"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/documentation/source/actions.rst b/documentation/source/actions.rst new file mode 100644 index 00000000..d3279fc2 --- /dev/null +++ b/documentation/source/actions.rst @@ -0,0 +1,42 @@ +Running Actions +=============== + +It is possible to run actions from the command line. +This is useful to carry out conversational tasks as command line scripts or cron jobs. + +To run an action from the command line, use the following command: + +.. code-block:: bash + + $ wafl run-action + +For example, to run the ``hello-world`` action, use the following command: + +.. code-block:: bash + + $ wafl run-action hello-world + + +The ``run-action`` command will look in to the file actions.yaml. +A typical actions.yaml file looks like this: + +.. code-block:: yaml + + hello-world: + - + action: say "hello world" + expected: the bot outputs a greeting + - + action: say "hello world" again + expected: the bot outputs another greeting + + +Each action is a list of steps. +Each step is a dictionary with two keys: ``action`` and ``expected``. +The ``action`` key is the action to be executed. +The ``expected`` key is the expected response from the bot. +If the bot does not respond as expected, the action will run again until it is successful for a maximum of 10 times. + +The ``run-action`` command will run the action and print the result to the console. +Each action will call rules and functions as in a normal conversation. + diff --git a/documentation/source/index.rst b/documentation/source/index.rst index 431643d7..9f32cc73 100644 --- a/documentation/source/index.rst +++ b/documentation/source/index.rst @@ -17,6 +17,8 @@ Welcome to WAFL's 0.0.80 documentation! running_WAFL facts_and_rules examples + testcases + actions license Indices and tables diff --git a/documentation/source/testcases.rst b/documentation/source/testcases.rst new file mode 100644 index 00000000..f7be4e06 --- /dev/null +++ b/documentation/source/testcases.rst @@ -0,0 +1,49 @@ +Creating a testcase +=================== + +The file testcases.txt contains a list of testcases. +Each testcase consists of a title and a list of utterances. + +.. code-block:: bash + + test the greetings work + user: Hello + bot: Hello there! What is your name + user: Bob + bot: Nice to meet you, bob! + + +The title is used to name the testcase. +Each line starting with "user:" is an utterance from the user. +Conversely, each line starting with "bot:" is an utterance from the bot. +The test passes if the bot responds with the utterance from the bot in a way that is consistent with the +answers in the testcase. +The test fails if the bot responds with an utterance that is not in the testcase. + + +Running the testcases +--------------------- + +To run the testcases, run the following command: + +.. code-block:: bash + + $ wafl run-tests + +This will run all the testcases in the testcases.txt file. + + +Negative testcases +------------------ + +Negative testcases are testcases that are expected to fail. +They are useful to test that the bot does not respond in a certain way. +Negative testcases are prefixed with "!". + +.. code-block:: bash + + ! test the greetings uses the correct name + user: Hello + bot: Hello there! What is your name + user: Bob + bot: Nice to meet you, unknown! \ No newline at end of file diff --git a/wafl/variables.py b/wafl/variables.py index 47a4ebed..84ebf205 100644 --- a/wafl/variables.py +++ b/wafl/variables.py @@ -1,4 +1,4 @@ def get_variables(): return { - "version": "0.0.81", + "version": "0.0.82", }

  • Rule with remember command