From 4d4abed570c8b826d02c14303299a7d368083502 Mon Sep 17 00:00:00 2001 From: yumos Date: Sat, 7 Dec 2024 17:26:56 +0800 Subject: [PATCH] fix typos in brain_defaults.py, llm_endpoint.py and utils.py --- core/quivr_core/brain/brain_defaults.py | 2 +- core/quivr_core/llm/llm_endpoint.py | 2 +- core/quivr_core/rag/utils.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/quivr_core/brain/brain_defaults.py b/core/quivr_core/brain/brain_defaults.py index 5e613447ffab..e96a13eb2720 100644 --- a/core/quivr_core/brain/brain_defaults.py +++ b/core/quivr_core/brain/brain_defaults.py @@ -39,7 +39,7 @@ def default_embedder() -> Embeddings: return embedder except ImportError as e: raise ImportError( - "Please provide a valid Embedder or install quivr-core['base'] package for using the defaultone." + "Please provide a valid Embedder or install quivr-core['base'] package for using the default one." ) from e diff --git a/core/quivr_core/llm/llm_endpoint.py b/core/quivr_core/llm/llm_endpoint.py index d4ffcf87ac6a..26b6426b54eb 100644 --- a/core/quivr_core/llm/llm_endpoint.py +++ b/core/quivr_core/llm/llm_endpoint.py @@ -38,7 +38,7 @@ def __init__(self, llm_config: LLMEndpointConfig, llm: BaseChatModel): self.tokenizer = AutoTokenizer.from_pretrained(llm_config.tokenizer_hub) except OSError: # if we don't manage to connect to huggingface and/or no cached models are present logger.warning( - f"Cannot acces the configured tokenizer from {llm_config.tokenizer_hub}, using the default tokenizer {llm_config.fallback_tokenizer}" + f"Cannot access the configured tokenizer from {llm_config.tokenizer_hub}, using the default tokenizer {llm_config.fallback_tokenizer}" ) self.tokenizer = tiktoken.get_encoding(llm_config.fallback_tokenizer) else: diff --git a/core/quivr_core/rag/utils.py b/core/quivr_core/rag/utils.py index edf66a4a72bd..47c56f0c5992 100644 --- a/core/quivr_core/rag/utils.py +++ b/core/quivr_core/rag/utils.py @@ -28,7 +28,7 @@ def model_supports_function_calling(model_name: str): return model_name not in models_not_supporting_function_calls -def format_history_to_openai_mesages( +def format_history_to_openai_messages( tuple_history: List[Tuple[str, str]], system_message: str, question: str ) -> List[BaseMessage]: """Format the chat history into a list of Base Messages"""