From cf64490836fecf9ac2ef95455011357535d93935 Mon Sep 17 00:00:00 2001 From: vivek Date: Thu, 19 Oct 2023 13:54:25 -0400 Subject: [PATCH] Save LLM responses to file. --- GPTResponder.py | 19 +++++++++++++------ main.py | 9 +++++++-- parameters.yaml | 4 ++++ ui.py | 6 +----- 4 files changed, 25 insertions(+), 13 deletions(-) diff --git a/GPTResponder.py b/GPTResponder.py index 4d76478..006c320 100644 --- a/GPTResponder.py +++ b/GPTResponder.py @@ -16,7 +16,9 @@ class GPTResponder: """Handles all interactions with openAI LLM / ChatGPT """ - def __init__(self, convo: conversation.Conversation): + def __init__(self, convo: conversation.Conversation, + save_to_file: bool = False, + file_name: str = 'response.txt'): root_logger.info(GPTResponder.__name__) self.response = prompts.INITIAL_RESPONSE self.response_interval = 2 @@ -25,8 +27,10 @@ def __init__(self, convo: conversation.Conversation): self.conversation = convo self.config = configuration.Config().data self.model = self.config['OpenAI']['ai_model'] + self.save_response_to_file = save_to_file + self.response_file = file_name - def generate_response_from_transcript_no_check(self, transcript) -> str: + def generate_response_from_transcript_no_check(self) -> str: """Ping LLM to get a suggested response right away. Gets a response even if the continuous suggestion option is disabled. Updates the conversation object with the response from LLM. @@ -63,18 +67,21 @@ def generate_response_from_transcript_no_check(self, transcript) -> str: multi_turn_response_content = multi_turn_response.choices[0].message.content try: - # The original way of processing the response. - # It causes issues when there are multiple questions in the transcript. # pprint.pprint(f'Multi turn response: {multi_turn_response_content}') processed_multi_turn_response = self.process_response(multi_turn_response_content) self.update_conversation(persona=constants.PERSONA_ASSISTANT, response=processed_multi_turn_response) - return processed_multi_turn_response except Exception as exception: root_logger.error('Error parsing response from LLM.') root_logger.exception(exception) return prompts.INITIAL_RESPONSE + if self.save_response_to_file: + with open(file=self.response_file, mode="a", encoding='utf-8') as f: + f.write(f'{datetime.datetime.now()} - {processed_multi_turn_response}\n') + + return processed_multi_turn_response + def process_response(self, input_str: str) -> str: """ Extract relevant data from LLM response. """ @@ -97,7 +104,7 @@ def generate_response_from_transcript(self, transcript): if self.gl_vars.freeze_state[0]: return '' - return self.generate_response_from_transcript_no_check(transcript) + return self.generate_response_from_transcript_no_check() def update_conversation(self, response, persona): root_logger.info(GPTResponder.update_conversation.__name__) diff --git a/main.py b/main.py index f8fd9d9..5325b77 100644 --- a/main.py +++ b/main.py @@ -34,6 +34,7 @@ def save_api_key(args: argparse.Namespace): def initiate_app_threads(global_vars: GlobalVars, + config: dict, model: TranscriberModels.APIWhisperTranscriber | TranscriberModels.WhisperTranscriber): """Start all threads required for the application""" # Transcribe and Respond threads, both work on the same instance of the AudioTranscriber class @@ -48,7 +49,11 @@ def initiate_app_threads(global_vars: GlobalVars, transcribe_thread.daemon = True transcribe_thread.start() - global_vars.responder = GPTResponder(convo=global_vars.convo) + save_llm_response_to_file: bool = config['General']['save_llm_response_to_file'] + llm_response_file = config['General']['llm_response_file'] + global_vars.responder = GPTResponder(convo=global_vars.convo, + save_to_file=save_llm_response_to_file, + file_name=llm_response_file) respond_thread = threading.Thread(target=global_vars.responder.respond_to_transcriber, name='Respond', @@ -260,7 +265,7 @@ def main(): print('[INFO] Disabling Microphone') ui_cb.enable_disable_microphone(global_vars.editmenu) - initiate_app_threads(global_vars=global_vars, model=model) + initiate_app_threads(global_vars=global_vars, config=config, model=model) print("READY") diff --git a/parameters.yaml b/parameters.yaml index ee8b36a..cf09cd1 100644 --- a/parameters.yaml +++ b/parameters.yaml @@ -45,6 +45,10 @@ OpenAI: General: log_file: 'Transcribe.log' + # These two parameters are used together. + # Save LLM response to file if save_llm_response_to_file is Yes + save_llm_response_to_file: Yes # Possible values are Yes, No + llm_response_file: 'response.txt' # Attempt transcription of the sound file after these number of seconds transcript_audio_duration_seconds: 3 # These two parameters are used together. diff --git a/ui.py b/ui.py index d2a5a7a..46b267b 100644 --- a/ui.py +++ b/ui.py @@ -66,11 +66,7 @@ def update_response_ui_now(self): Update the Response UI with the response """ # print(f'{datetime.datetime.now()}: Get the response from LLM now') - transcript_string = self.global_vars.transcriber.get_transcript( - length=constants.MAX_TRANSCRIPTION_PHRASES_FOR_LLM) - - response_string = self.global_vars.responder.generate_response_from_transcript_no_check( - transcript_string) + response_string = self.global_vars.responder.generate_response_from_transcript_no_check() self.global_vars.response_textbox.configure(state="normal") write_in_textbox(self.global_vars.response_textbox, response_string) self.global_vars.response_textbox.configure(state="disabled")