diff --git a/recipes/llm-voice-assistant/python/cli/main.py b/recipes/llm-voice-assistant/python/cli/main.py index 4be536e..0eb737d 100644 --- a/recipes/llm-voice-assistant/python/cli/main.py +++ b/recipes/llm-voice-assistant/python/cli/main.py @@ -203,7 +203,7 @@ def flush(self): def interrupt(self): self.orca_connection.send({'command': Commands.INTERRUPT}) while self.orca_connection.poll() and self.orca_connection.recv()['command'] != Commands.INTERRUPT: - time.sleep(0.01) + time.sleep(0.1) self.speaker.interrupt() def tick(self): @@ -248,6 +248,7 @@ def handler(_, __) -> None: text_queue = Queue() while not close: while connection.poll(): + time.sleep(0.1) message = connection.recv() if message['command'] == Commands.CLOSE: close = True @@ -269,7 +270,7 @@ def handler(_, __) -> None: orca_profiler.reset() utterance_end_sec = 0 delay_sec = -1 - if not text_queue.empty(): + while not text_queue.empty(): text = text_queue.get() orca_profiler.tick() pcm = orca_stream.synthesize(text) @@ -321,7 +322,7 @@ def process(self, text: str, utterance_end_sec): def interrupt(self): self.pllm_connection.send({'command': Commands.INTERRUPT}) while self.pllm_connection.poll() and self.pllm_connection.recv()['command'] != Commands.INTERRUPT: - time.sleep(0.01) + time.sleep(0.1) print('', flush=True) self.synthesizer.interrupt() @@ -406,6 +407,7 @@ def llm_task(text): llm_future = None interrupting = False while not close: + time.sleep(0.1) while connection.poll(): message = connection.recv() if message['command'] == Commands.CLOSE: @@ -434,7 +436,7 @@ def llm_task(text): connection.send({'command': Commands.INTERRUPT}) finally: while llm_future and llm_future.done(): - time.sleep(0.01) + time.sleep(0.1) del executor pllm.release() diff --git a/recipes/llm-voice-assistant/python/windows_gui/main.py b/recipes/llm-voice-assistant/python/windows_gui/main.py index 903a221..4eaed84 100644 --- a/recipes/llm-voice-assistant/python/windows_gui/main.py +++ b/recipes/llm-voice-assistant/python/windows_gui/main.py @@ -206,6 +206,7 @@ def handler(_, __) -> None: flushing = False text_queue = Queue() while not close: + time.sleep(0.1) while connection.poll(): message = connection.recv() if message['command'] == Commands.CLOSE: @@ -350,6 +351,7 @@ def llm_task(text): llm_future = None interrupting = False while not close: + time.sleep(0.1) while connection.poll(): message = connection.recv() if message['command'] == Commands.CLOSE: @@ -745,6 +747,9 @@ def handler(_, __) -> None: pass signal.signal(signal.SIGINT, handler) + if not sys.platform.lower().startswith('win'): + return + try: gpu_usage_counters_format = r'"\GPU Engine(pid_{}_*)\Utilization Percentage"' gpu_usage_counters = ', '.join([gpu_usage_counters_format.format(pid) for pid in pids]) @@ -856,10 +861,6 @@ def handler(_, __) -> None: if __name__ == '__main__': - if not sys.platform.lower().startswith('win'): - print('Error: Only runs on Windows platforms') - exit(1) - parser = ArgumentParser() parser.add_argument( '--config',