Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Save LLM Responses to a text file #81

Merged
merged 1 commit into from
Oct 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 13 additions & 6 deletions GPTResponder.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
class GPTResponder:
"""Handles all interactions with openAI LLM / ChatGPT
"""
def __init__(self, convo: conversation.Conversation):
def __init__(self, convo: conversation.Conversation,
save_to_file: bool = False,
file_name: str = 'response.txt'):
root_logger.info(GPTResponder.__name__)
self.response = prompts.INITIAL_RESPONSE
self.response_interval = 2
Expand All @@ -25,8 +27,10 @@ def __init__(self, convo: conversation.Conversation):
self.conversation = convo
self.config = configuration.Config().data
self.model = self.config['OpenAI']['ai_model']
self.save_response_to_file = save_to_file
self.response_file = file_name

def generate_response_from_transcript_no_check(self, transcript) -> str:
def generate_response_from_transcript_no_check(self) -> str:
"""Ping LLM to get a suggested response right away.
Gets a response even if the continuous suggestion option is disabled.
Updates the conversation object with the response from LLM.
Expand Down Expand Up @@ -63,18 +67,21 @@ def generate_response_from_transcript_no_check(self, transcript) -> str:

multi_turn_response_content = multi_turn_response.choices[0].message.content
try:
# The original way of processing the response.
# It causes issues when there are multiple questions in the transcript.
# pprint.pprint(f'Multi turn response: {multi_turn_response_content}')
processed_multi_turn_response = self.process_response(multi_turn_response_content)
self.update_conversation(persona=constants.PERSONA_ASSISTANT,
response=processed_multi_turn_response)
return processed_multi_turn_response
except Exception as exception:
root_logger.error('Error parsing response from LLM.')
root_logger.exception(exception)
return prompts.INITIAL_RESPONSE

if self.save_response_to_file:
with open(file=self.response_file, mode="a", encoding='utf-8') as f:
f.write(f'{datetime.datetime.now()} - {processed_multi_turn_response}\n')

return processed_multi_turn_response

def process_response(self, input_str: str) -> str:
""" Extract relevant data from LLM response.
"""
Expand All @@ -97,7 +104,7 @@ def generate_response_from_transcript(self, transcript):
if self.gl_vars.freeze_state[0]:
return ''

return self.generate_response_from_transcript_no_check(transcript)
return self.generate_response_from_transcript_no_check()

def update_conversation(self, response, persona):
root_logger.info(GPTResponder.update_conversation.__name__)
Expand Down
9 changes: 7 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def save_api_key(args: argparse.Namespace):


def initiate_app_threads(global_vars: GlobalVars,
config: dict,
model: TranscriberModels.APIWhisperTranscriber | TranscriberModels.WhisperTranscriber):
"""Start all threads required for the application"""
# Transcribe and Respond threads, both work on the same instance of the AudioTranscriber class
Expand All @@ -48,7 +49,11 @@ def initiate_app_threads(global_vars: GlobalVars,
transcribe_thread.daemon = True
transcribe_thread.start()

global_vars.responder = GPTResponder(convo=global_vars.convo)
save_llm_response_to_file: bool = config['General']['save_llm_response_to_file']
llm_response_file = config['General']['llm_response_file']
global_vars.responder = GPTResponder(convo=global_vars.convo,
save_to_file=save_llm_response_to_file,
file_name=llm_response_file)

respond_thread = threading.Thread(target=global_vars.responder.respond_to_transcriber,
name='Respond',
Expand Down Expand Up @@ -260,7 +265,7 @@ def main():
print('[INFO] Disabling Microphone')
ui_cb.enable_disable_microphone(global_vars.editmenu)

initiate_app_threads(global_vars=global_vars, model=model)
initiate_app_threads(global_vars=global_vars, config=config, model=model)

print("READY")

Expand Down
4 changes: 4 additions & 0 deletions parameters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ OpenAI:

General:
log_file: 'Transcribe.log'
# These two parameters are used together.
# Save LLM response to file if save_llm_response_to_file is Yes
save_llm_response_to_file: Yes # Possible values are Yes, No
llm_response_file: 'response.txt'
# Attempt transcription of the sound file after these number of seconds
transcript_audio_duration_seconds: 3
# These two parameters are used together.
Expand Down
6 changes: 1 addition & 5 deletions ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,7 @@ def update_response_ui_now(self):
Update the Response UI with the response
"""
# print(f'{datetime.datetime.now()}: Get the response from LLM now')
transcript_string = self.global_vars.transcriber.get_transcript(
length=constants.MAX_TRANSCRIPTION_PHRASES_FOR_LLM)

response_string = self.global_vars.responder.generate_response_from_transcript_no_check(
transcript_string)
response_string = self.global_vars.responder.generate_response_from_transcript_no_check()
self.global_vars.response_textbox.configure(state="normal")
write_in_textbox(self.global_vars.response_textbox, response_string)
self.global_vars.response_textbox.configure(state="disabled")
Expand Down