Skip to content

Commit

Permalink
Merge pull request #98 from fractalego/prompt-as-list
Browse files Browse the repository at this point in the history
Prompt as list
  • Loading branch information
fractalego committed Jun 15, 2024
2 parents 7c1bf55 + d09d29d commit eb8e946
Show file tree
Hide file tree
Showing 14 changed files with 272 additions and 179 deletions.
16 changes: 16 additions & 0 deletions todo.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,19 @@
wafl:
- create indices

training:
- retrain phi3
- add tokens <execute> and <remember> to the training data
- add some prior conversation to the training data, taken from other examples
- add more unused rules in the prompt



* after a rule is deleted, you should also prune the conversation above.
The system can get confused if the conversation becomes too long
re-train the system with prior conversations before calling the rule


* substitute utterances in base_interface with the conversation class

* add config file for model names
Expand Down
140 changes: 140 additions & 0 deletions wafl/answerer/answerer_implementation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import re
import traceback

from typing import List, Tuple

from wafl.exceptions import CloseConversation
from wafl.facts import Fact
from wafl.interface.conversation import Conversation, Utterance


def is_executable(text: str) -> bool:
return "<execute>" in text


def create_one_liner(query_text):
return Conversation(
[
Utterance(
query_text,
"user",
)
]
)


async def substitute_memory_in_answer_and_get_memories_if_present(
answer_text: str,
) -> Tuple[str, List[str]]:
matches = re.finditer(
r"<remember>(.*?)</remember>|<remember>(.*?)$",
answer_text,
re.DOTALL | re.MULTILINE,
)
memories = []
for match in matches:
to_substitute = match.group(1)
if not to_substitute:
continue
answer_text = answer_text.replace(match.group(0), "[Output in memory]")
memories.append(to_substitute)

answer_text = answer_text.replace("<br>", "\n")
matches = re.finditer(r"<remember>(.*?)$", answer_text, re.DOTALL | re.MULTILINE)
memories = []
for match in matches:
to_substitute = match.group(1)
if not to_substitute:
continue
answer_text = answer_text.replace(match.group(0), "[Output in memory]")
memories.append(to_substitute)

return answer_text, memories


async def execute_results_in_answer(answer_text: str, module, functions) -> str:
matches = re.finditer(
r"<execute>(.*?)</execute>|<execute>(.*?\))$",
answer_text,
re.DOTALL | re.MULTILINE,
)
for match in matches:
to_execute = match.group(1)
if not to_execute:
continue
result = await _run_code(to_execute, module, functions)
answer_text = answer_text.replace(match.group(0), result)

matches = re.finditer(r"<execute>(.*?\))$", answer_text, re.DOTALL | re.MULTILINE)
for match in matches:
to_execute = match.group(1)
if not to_execute:
continue
result = await _run_code(to_execute, module, functions)
answer_text = answer_text.replace(match.group(0), result)

return answer_text


async def _run_code(to_execute: str, module, functions) -> str:
result = None
for _ in range(3):
try:
if any(item + "(" in to_execute for item in functions):
result = eval(f"module.{to_execute}")
break

else:
ldict = {}
exec(to_execute, globals(), ldict)
if "result" in ldict:
result = str(ldict["result"])
break

except NameError as e:
match = re.search(r"\'(\w+)\' is not defined", str(e))
if match:
to_import = match.group(1)
to_execute = f"import {to_import}\n{to_execute}"

except CloseConversation as e:
raise e

except Exception as e:
result = (
f"Error while executing\n\n```python\n{to_execute}\n```\n\n{str(e)}"
)
traceback.print_exc()
break

if not result:
result = f"\n```python\n{to_execute}\n```"

return result


def get_text_from_facts_and_thresholds(
facts_and_thresholds: List[Tuple[Fact, float]], memory: str
) -> List[str]:
return [item[0].text for item in facts_and_thresholds if item[0].text not in memory]


def add_dummy_utterances_to_continue_generation(
conversation: Conversation, answer_text: str
):
conversation.add_utterance(
Utterance(
answer_text,
"bot",
)
)
conversation.add_utterance(
Utterance(
"Continue",
"user",
)
)


def add_memories_to_facts(facts: str, memories: List[str]) -> str:
return facts + "\n" + "\n".join(memories)
Loading

0 comments on commit eb8e946

Please sign in to comment.