Skip to content

Commit e397265

Browse files
committed
Better messaging
1 parent 88a0c6b commit e397265

File tree

5 files changed

+13
-12
lines changed

5 files changed

+13
-12
lines changed

interpreter/llm/setup_local_text_llm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ def setup_local_text_llm(interpreter):
1212

1313
repo_id = interpreter.model.replace("huggingface/", "")
1414

15+
display_markdown_message(f"> **Warning**: Local LLM usage is an experimental, unstable feature.")
16+
1517
if repo_id != "TheBloke/Mistral-7B-Instruct-v0.1-GGUF":
1618
# ^ This means it was prob through the old --local, so we have already displayed this message.
1719
# Hacky. Not happy with this

interpreter/llm/setup_text_llm.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,11 @@ def setup_text_llm(interpreter):
4949

5050
display_markdown_message(f"""
5151
> Failed to install `{interpreter.model}`.
52-
\n\n**Common Fixes:** You can follow our simple setup docs at the link below to resolve common errors.\n\n> `https://github.com/KillianLucas/open-interpreter/tree/main/docs`
53-
\n\n**If you've tried that and you're still getting an error, we have likely not built the proper `{interpreter.model}` support for your system.**
54-
\n\n*( Running language models locally is a difficult task!* If you have insight into the best way to implement this across platforms/architectures, please join the Open Interpreter community Discord and consider contributing the project's development.
52+
\n\n**We have likely not built the proper `{interpreter.model}` support for your system.**
53+
\n\n(*Running language models locally is a difficult task!* If you have insight into the best way to implement this across platforms/architectures, please join the `Open Interpreter` community Discord, or the `Oobabooga` community Discord, and consider contributing the development of these projects.)
5554
""")
5655

57-
raise Exception("Architecture not yet supported for local LLM inference. Please run `interpreter` to connect to a cloud model, then try `--local` again in a few days. Scroll up for more tips.")
56+
raise Exception("Architecture not yet supported for local LLM inference via `Oobabooga`. Please run `interpreter` to connect to a cloud model.")
5857

5958
# Pass remaining parameters to LiteLLM
6059
def base_llm(messages):

interpreter/terminal_interface/validate_llm_settings.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ def validate_llm_settings(interpreter):
2121
# Interactive prompt to download the best local model we know of
2222

2323
display_markdown_message("""
24-
**Open Interpreter** will use `Mistral 7B` for local execution (experimental).
25-
""")
24+
**Open Interpreter** will use `Mistral 7B` for local execution.""")
2625

2726
if interpreter.gguf_quality == None:
2827
interpreter.gguf_quality = 0.35
@@ -102,7 +101,8 @@ def validate_llm_settings(interpreter):
102101
# If we're here, we passed all the checks.
103102

104103
# Auto-run is for fast, light useage -- no messages.
105-
if not interpreter.auto_run:
104+
# If mistral, we've already displayed a message.
105+
if not interpreter.auto_run and "mistral" not in interpreter.model.lower():
106106
display_markdown_message(f"> Model set to `{interpreter.model.upper()}`")
107107
return
108108

poetry.lock

Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ litellm = "^0.1.590"
2929
pyyaml = "^6.0.1"
3030
semgrep = "^1.41.0"
3131
yaspin = "^3.0.1"
32-
ooba = "^0.0.18"
32+
ooba = "*"
3333
[tool.poetry.dependencies.pyreadline3]
3434
version = "^3.4.1"
3535
markers = "sys_platform == 'win32'"

0 commit comments

Comments
 (0)