diff --git a/.gitignore b/.gitignore
index 7bcee1b..cf86773 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,3 +16,4 @@ __pycache__/
htmlcov/
.coverage
*.swp
+.aider*
diff --git a/README.md b/README.md
index 34965ff..ebd5327 100644
--- a/README.md
+++ b/README.md
@@ -24,7 +24,7 @@ if __name__ == "__main__":
X_test = dataset['test']['X']
y_test = dataset['test']['y']
- model = ProductSlugGenerator(n_jobs=4, print_prompt=True)
+ model = ProductSlugGenerator(n_jobs=4)
before_test_accuracy = None
if os.path.exists(output_path):
diff --git a/examples/amazon/generate_slugs.py b/examples/amazon/generate_slugs.py
index f98e602..907e6e0 100644
--- a/examples/amazon/generate_slugs.py
+++ b/examples/amazon/generate_slugs.py
@@ -105,8 +105,7 @@ def evaluate_model(model, X, y):
X_test = dataset['test']['X']
y_test = dataset['test']['y']
- model = ProductSlugGenerator(n_jobs=1, print_prompt=True)
- # model.generate_slug.set_model_kwargs({'print_prompt': True})
+ model = ProductSlugGenerator(n_jobs=1)
before_test_accuracy = None
if os.path.exists(output_path):
diff --git a/langdspy/prompt_runners.py b/langdspy/prompt_runners.py
index bd95059..e9648fa 100644
--- a/langdspy/prompt_runners.py
+++ b/langdspy/prompt_runners.py
@@ -119,6 +119,7 @@ def _invoke_with_retries(self, chain, input, max_tries=1, config: Optional[Runna
llm_type, llm_model = self._get_llm_info(config)
logger.debug(f"LLM type: {llm_type} - model {llm_model}")
+ prompt_res = None
while max_tries >= 1:
start_time = time.time()
@@ -192,7 +193,7 @@ def _validate_output(self, parsed_output, input):
for attr_name, output_field in self.template.output_variables.items():
output_value = parsed_output.get(attr_name)
if output_value is None:
- if not getattr(output_field, 'optional', False):
+ if not output_field.kwargs['optional']:
return f"Failed to get output value for non-optional field {attr_name} for prompt runner {self.template.__class__.__name__}"
else:
parsed_output[attr_name] = None
diff --git a/pyproject.toml b/pyproject.toml
index 82e69fb..b95759d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -59,8 +59,6 @@ platformdirs = "4.2.0"
pluggy = "1.4.0"
ptyprocess = "0.7.0"
pycparser = "2.21"
-pydantic = "2.6.1"
-pydantic-core = "2.16.2"
pygments = "2.17.2"
pyproject-hooks = "1.0.0"
pytest = "8.0.2"
@@ -75,7 +73,7 @@ shellingham = "1.5.4"
sniffio = "1.3.0"
tenacity = "8.2.3"
threadpoolctl = "3.3.0"
-tiktoken = "0.6.0"
+tiktoken = "^0.7.0"
tokenizers = "0.15.2"
tomlkit = "0.12.4"
tqdm = "4.66.2"
@@ -87,6 +85,11 @@ xattr = "1.1.0"
yarl = "1.9.4"
zipp = "3.17.0"
ratelimit = "^2.2.1"
+langchain = "^0.2.7"
+langchain-anthropic = "^0.1.19"
+langchain-openai = "^0.1.14"
+langchain-community = "^0.2.7"
+scikit-learn = "^1.5.1"
[tool.poetry.dev-dependencies]
diff --git a/tests/test_fake_anthropic.py b/tests/test_fake_anthropic.py
index dbecec9..98faa8f 100644
--- a/tests/test_fake_anthropic.py
+++ b/tests/test_fake_anthropic.py
@@ -1,35 +1,35 @@
import pytest
-from langdspy import PromptRunner, PromptSignature, InputField, OutputField, DefaultPromptStrategy
-from langchain_contrib.llms.testing import FakeLLM
-
-class TestPromptSignature(PromptSignature):
- input = InputField(name="input", desc="Input field")
- output = OutputField(name="output", desc="Output field")
-
-def test_fake_anthropic_llm():
- prompt_runner = PromptRunner(template_class=TestPromptSignature, prompt_strategy=DefaultPromptStrategy)
-
- # Test with default response
- llm = FakeLLM()
- result = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
+from langdspy import PromptRunner, PromptSignature, InputField, OutputField, DefaultPromptStrategy, Model
+from langchain_community.llms import FakeListLLM
+# from langchain_contrib.llms.testing import FakeLLM
+
+def create_test_model():
+ class TestPromptSignature(PromptSignature):
+ input = InputField(name="input", desc="Input field")
+ output = OutputField(name="output", desc="Output field")
+
+
+ class TestModel(Model):
+ p1 = PromptRunner(template_class=TestPromptSignature, prompt_strategy=DefaultPromptStrategy)
+
+ def invoke(self, input: str, config: dict) -> str:
+ result = self.p1.invoke({"input": input}, config=config)
+ return result
+
+ return TestModel()
+
+@pytest.fixture
+def test_model():
+ return create_test_model()
+
+
+def test_fake_anthropic_llm(test_model):
+ llm = FakeListLLM(verbose=True, responses=["", "", ""])
+ result = test_model.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result.output == "foo"
- # Test with custom mapped response
- llm = FakeLLM(mapped_responses={"test input": ""})
- result = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
- assert result.output == "Custom response"
-
- # Test with sequenced responses
- llm = FakeLLM(sequenced_responses=["", "", ""])
-
- result1 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
- assert result1.output == "One"
-
- result2 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
- assert result2.output == "Two"
-
- result3 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
- assert result3.output == "Three"
-
- result4 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
- assert result4.output == "foo" # Default response after exhausting sequenced responses
+ result = test_model.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
+ assert result.output == "bar"
+
+ result = test_model.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
+ assert result.output == "baz"
\ No newline at end of file
diff --git a/tests/test_model_train.py b/tests/test_model_train.py
index 3a3640a..81be38e 100644
--- a/tests/test_model_train.py
+++ b/tests/test_model_train.py
@@ -70,7 +70,7 @@ def slug_similarity(X, true_slugs, predicted_slugs):
@pytest.fixture
def model():
- return ProductSlugGenerator(n_jobs=1, print_prompt=False)
+ return ProductSlugGenerator(n_jobs=1)
@pytest.fixture
def llm():
diff --git a/tests/test_prompt_runner.py b/tests/test_prompt_runner.py
index d0c3f3e..37a775c 100644
--- a/tests/test_prompt_runner.py
+++ b/tests/test_prompt_runner.py
@@ -8,7 +8,7 @@
import pytest
from unittest.mock import MagicMock
from langchain.chains import LLMChain
-from langdspy import PromptRunner, DefaultPromptStrategy, InputField, OutputField, Model, PromptSignature, Prediction
+from langdspy import PromptRunner, DefaultPromptStrategy, InputField, OutputField, Model, PromptSignature, Prediction, PromptStrategy
class TestPromptSignature(PromptSignature):
input = InputField(name="input", desc="Input field")
@@ -25,21 +25,6 @@ def invoke(self, input_dict, config):
from unittest.mock import patch
-# def test_print_prompt_in_config():
-# model = TestModel(n_jobs=1, print_prompt=True)
-
-# input_dict = {"input": "Test input"}
-# mock_invoke = MagicMock(return_value=Prediction(**{**input_dict, "output": "Test output"}))
-
-# # with patch.object(TestModel, 'invoke', new=mock_invoke):
-# config = {"llm": mock_invoke}
-# result = model.invoke(input_dict, config)
-# print(result)
-
-# mock_invoke.assert_called_once_with(input_dict, config)
-# assert "print_prompt" in config
-# assert config["print_prompt"] == True
-# assert result.output == "Test output"
from langchain.chat_models.base import BaseChatModel
@@ -53,34 +38,19 @@ def _generate(self, *args, **kwargs):
def _llm_type(self) -> str:
return "test"
-def test_print_prompt_in_inputs():
- model = TestModel(n_jobs=1, print_prompt="TEST")
- input_dict = {"input": "Test input"}
- mock_invoke = MagicMock(return_value="FORMATTED PROMPT")
-
- with patch.object(DefaultPromptStrategy, 'format_prompt', new=mock_invoke):
- config = {"llm": TestLLM(), "llm_type": "test"}
- result = model.invoke(input_dict, config=config)
-
- print(result)
- print(f"Called with {mock_invoke.call_count} {mock_invoke.call_args_list} {mock_invoke.call_args}")
- call_args = {**input_dict, 'print_prompt': "TEST", 'trained_state': model.trained_state, 'use_training': True, 'llm_type': "test"}
- print(f"Expecting call {call_args}")
- mock_invoke.assert_called_with(**call_args)
-
def test_trained_state_in_inputs():
model = TestModel(n_jobs=1)
input_dict = {"input": "Test input"}
mock_invoke = MagicMock(return_value="FORMATTED PROMPT")
- with patch.object(DefaultPromptStrategy, 'format_prompt', new=mock_invoke):
+ with patch.object(PromptStrategy, 'format_prompt', new=mock_invoke):
config = {"llm": TestLLM(), "llm_type": "test"}
model.trained_state.examples = [("EXAMPLE_X", "EXAMPLE_Y")]
result = model.invoke(input_dict, config=config)
print(result)
print(f"Called with {mock_invoke.call_count} {mock_invoke.call_args_list} {mock_invoke.call_args}")
- call_args = {**input_dict, 'print_prompt': "TEST", 'trained_state': model.trained_state, 'use_training': True, 'llm_type': "test"}
+ call_args = {**input_dict, 'trained_state': model.trained_state, 'use_training': True, 'llm_type': "test"}
print(f"Expecting call {call_args}")
mock_invoke.assert_called_with(**call_args)
@@ -96,6 +66,6 @@ def test_use_training():
print(result)
print(f"Called with {mock_invoke.call_count} {mock_invoke.call_args_list} {mock_invoke.call_args}")
- call_args = {**input_dict, 'print_prompt': "TEST", 'trained_state': model.trained_state, 'use_training': False, 'llm_type': "test"}
+ call_args = {**input_dict, 'trained_state': model.trained_state, 'use_training': False, 'llm_type': "test"}
print(f"Expecting call {call_args}")
mock_invoke.assert_called_with(**call_args)
\ No newline at end of file