diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index dcd5afd..064ceab 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -16,10 +16,12 @@ import argparse import asyncio +import os import sys import threading import pyperclip # To paste the clipboard into the terminal +from textual import constants as textual_constants from textual import events, work from textual.app import App, ComposeResult from textual.binding import Binding @@ -89,6 +91,7 @@ class VulcanConsole(App): border: tall #333333; scrollbar-size-vertical: 0; scrollbar-size-horizontal: 0; + color: #ffffff; } #llm_spinner { @@ -100,12 +103,14 @@ class VulcanConsole(App): #cmd { dock: bottom; + color: #ffffff; } #history_title { content-align: center middle; margin: 0; padding: 0; + color: #ffffff; } #history_scroll { @@ -118,6 +123,10 @@ class VulcanConsole(App): #history { width: 100%; } + + #variables { + color: #ffffff; + } """ # Bindings for the console @@ -140,6 +149,13 @@ def __init__( user_context: str = "", main_node=None, ): + # Used to set the same textual colors in a docker container + os.environ.setdefault("COLORTERM", "truecolor") + textual_constants.COLOR_SYSTEM = "truecolor" + + # Keep Hugging Face download progress bars out of redirected Textual stdout/stderr. + os.environ.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "1") + super().__init__() # Textual lib # -- Main variables -- diff --git a/src/vulcanai/tools/embedder.py b/src/vulcanai/tools/embedder.py index d8c4e2c..65fe764 100644 --- a/src/vulcanai/tools/embedder.py +++ b/src/vulcanai/tools/embedder.py @@ -12,12 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + import numpy as np from sentence_transformers import SentenceTransformer +from vulcanai.console.logger import VulcanAILogger + + +def info_msg_hf_model_loading(model_name: str, logger: Optional[VulcanAILogger] = None) -> None: + msg = f"Hugging Face is loading '{model_name}'. If the model is not cached yet, files are being downloaded..." + if logger is not None: + logger.log_console(msg) + else: + VulcanAILogger.default().log_console(msg) + class SBERTEmbedder: - def __init__(self, model_name="all-MiniLM-L6-v2"): + def __init__(self, model_name="all-MiniLM-L6-v2", logger: Optional[VulcanAILogger] = None): + info_msg_hf_model_loading(model_name, logger) self.model = SentenceTransformer(model_name) def embed(self, text: str) -> np.ndarray: diff --git a/src/vulcanai/tools/tool_registry.py b/src/vulcanai/tools/tool_registry.py index 686c8af..901f442 100644 --- a/src/vulcanai/tools/tool_registry.py +++ b/src/vulcanai/tools/tool_registry.py @@ -86,7 +86,7 @@ def __init__(self, embedder=None, logger=None): # Dictionary of deactivated_tools (name -> tool instance) self.deactivated_tools: Dict[str, ITool] = {} # Embedding model for tool metadata - self.embedder = embedder or SBERTEmbedder() + self.embedder = embedder or SBERTEmbedder(logger=self.logger) # Simple in-memory index of (name, embedding) self._index: list[Tuple[str, np.ndarray]] = [] # List of modules where tools can be loaded from