Skip to content

Commit

Permalink
All tests pass
Browse files Browse the repository at this point in the history
  • Loading branch information
aelaguiz committed Jul 10, 2024
1 parent cf3cea0 commit c92959c
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 74 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,4 @@ __pycache__/
htmlcov/
.coverage
*.swp
.aider*
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ if __name__ == "__main__":
X_test = dataset['test']['X']
y_test = dataset['test']['y']

model = ProductSlugGenerator(n_jobs=4, print_prompt=True)
model = ProductSlugGenerator(n_jobs=4)

before_test_accuracy = None
if os.path.exists(output_path):
Expand Down
3 changes: 1 addition & 2 deletions examples/amazon/generate_slugs.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,7 @@ def evaluate_model(model, X, y):
X_test = dataset['test']['X']
y_test = dataset['test']['y']

model = ProductSlugGenerator(n_jobs=1, print_prompt=True)
# model.generate_slug.set_model_kwargs({'print_prompt': True})
model = ProductSlugGenerator(n_jobs=1)

before_test_accuracy = None
if os.path.exists(output_path):
Expand Down
3 changes: 2 additions & 1 deletion langdspy/prompt_runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ def _invoke_with_retries(self, chain, input, max_tries=1, config: Optional[Runna
llm_type, llm_model = self._get_llm_info(config)

logger.debug(f"LLM type: {llm_type} - model {llm_model}")
prompt_res = None

while max_tries >= 1:
start_time = time.time()
Expand Down Expand Up @@ -192,7 +193,7 @@ def _validate_output(self, parsed_output, input):
for attr_name, output_field in self.template.output_variables.items():
output_value = parsed_output.get(attr_name)
if output_value is None:
if not getattr(output_field, 'optional', False):
if not output_field.kwargs['optional']:
return f"Failed to get output value for non-optional field {attr_name} for prompt runner {self.template.__class__.__name__}"
else:
parsed_output[attr_name] = None
Expand Down
9 changes: 6 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,6 @@ platformdirs = "4.2.0"
pluggy = "1.4.0"
ptyprocess = "0.7.0"
pycparser = "2.21"
pydantic = "2.6.1"
pydantic-core = "2.16.2"
pygments = "2.17.2"
pyproject-hooks = "1.0.0"
pytest = "8.0.2"
Expand All @@ -75,7 +73,7 @@ shellingham = "1.5.4"
sniffio = "1.3.0"
tenacity = "8.2.3"
threadpoolctl = "3.3.0"
tiktoken = "0.6.0"
tiktoken = "^0.7.0"
tokenizers = "0.15.2"
tomlkit = "0.12.4"
tqdm = "4.66.2"
Expand All @@ -87,6 +85,11 @@ xattr = "1.1.0"
yarl = "1.9.4"
zipp = "3.17.0"
ratelimit = "^2.2.1"
langchain = "^0.2.7"
langchain-anthropic = "^0.1.19"
langchain-openai = "^0.1.14"
langchain-community = "^0.2.7"
scikit-learn = "^1.5.1"

[tool.poetry.dev-dependencies]

Expand Down
64 changes: 32 additions & 32 deletions tests/test_fake_anthropic.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,35 @@
import pytest
from langdspy import PromptRunner, PromptSignature, InputField, OutputField, DefaultPromptStrategy
from langchain_contrib.llms.testing import FakeLLM

class TestPromptSignature(PromptSignature):
input = InputField(name="input", desc="Input field")
output = OutputField(name="output", desc="Output field")

def test_fake_anthropic_llm():
prompt_runner = PromptRunner(template_class=TestPromptSignature, prompt_strategy=DefaultPromptStrategy)

# Test with default response
llm = FakeLLM()
result = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
from langdspy import PromptRunner, PromptSignature, InputField, OutputField, DefaultPromptStrategy, Model
from langchain_community.llms import FakeListLLM
# from langchain_contrib.llms.testing import FakeLLM

def create_test_model():
class TestPromptSignature(PromptSignature):
input = InputField(name="input", desc="Input field")
output = OutputField(name="output", desc="Output field")


class TestModel(Model):
p1 = PromptRunner(template_class=TestPromptSignature, prompt_strategy=DefaultPromptStrategy)

def invoke(self, input: str, config: dict) -> str:
result = self.p1.invoke({"input": input}, config=config)
return result

return TestModel()

@pytest.fixture
def test_model():
return create_test_model()


def test_fake_anthropic_llm(test_model):
llm = FakeListLLM(verbose=True, responses=["<output>foo</output>", "<output>bar</output>", "<output>baz</output>"])
result = test_model.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result.output == "foo"

# Test with custom mapped response
llm = FakeLLM(mapped_responses={"<input>test input</input>": "<output>Custom response</output>"})
result = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result.output == "Custom response"

# Test with sequenced responses
llm = FakeLLM(sequenced_responses=["<output>One</output>", "<output>Two</output>", "<output>Three</output>"])

result1 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result1.output == "One"

result2 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result2.output == "Two"

result3 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result3.output == "Three"

result4 = prompt_runner.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result4.output == "foo" # Default response after exhausting sequenced responses
result = test_model.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result.output == "bar"

result = test_model.invoke({"input": "test input"}, config={"llm": llm, "llm_type": "fake_anthropic"})
assert result.output == "baz"
2 changes: 1 addition & 1 deletion tests/test_model_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def slug_similarity(X, true_slugs, predicted_slugs):

@pytest.fixture
def model():
return ProductSlugGenerator(n_jobs=1, print_prompt=False)
return ProductSlugGenerator(n_jobs=1)

@pytest.fixture
def llm():
Expand Down
38 changes: 4 additions & 34 deletions tests/test_prompt_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import pytest
from unittest.mock import MagicMock
from langchain.chains import LLMChain
from langdspy import PromptRunner, DefaultPromptStrategy, InputField, OutputField, Model, PromptSignature, Prediction
from langdspy import PromptRunner, DefaultPromptStrategy, InputField, OutputField, Model, PromptSignature, Prediction, PromptStrategy

class TestPromptSignature(PromptSignature):
input = InputField(name="input", desc="Input field")
Expand All @@ -25,21 +25,6 @@ def invoke(self, input_dict, config):

from unittest.mock import patch

# def test_print_prompt_in_config():
# model = TestModel(n_jobs=1, print_prompt=True)

# input_dict = {"input": "Test input"}
# mock_invoke = MagicMock(return_value=Prediction(**{**input_dict, "output": "Test output"}))

# # with patch.object(TestModel, 'invoke', new=mock_invoke):
# config = {"llm": mock_invoke}
# result = model.invoke(input_dict, config)
# print(result)

# mock_invoke.assert_called_once_with(input_dict, config)
# assert "print_prompt" in config
# assert config["print_prompt"] == True
# assert result.output == "Test output"

from langchain.chat_models.base import BaseChatModel

Expand All @@ -53,34 +38,19 @@ def _generate(self, *args, **kwargs):
def _llm_type(self) -> str:
return "test"

def test_print_prompt_in_inputs():
model = TestModel(n_jobs=1, print_prompt="TEST")
input_dict = {"input": "Test input"}
mock_invoke = MagicMock(return_value="FORMATTED PROMPT")

with patch.object(DefaultPromptStrategy, 'format_prompt', new=mock_invoke):
config = {"llm": TestLLM(), "llm_type": "test"}
result = model.invoke(input_dict, config=config)

print(result)
print(f"Called with {mock_invoke.call_count} {mock_invoke.call_args_list} {mock_invoke.call_args}")
call_args = {**input_dict, 'print_prompt': "TEST", 'trained_state': model.trained_state, 'use_training': True, 'llm_type': "test"}
print(f"Expecting call {call_args}")
mock_invoke.assert_called_with(**call_args)

def test_trained_state_in_inputs():
model = TestModel(n_jobs=1)
input_dict = {"input": "Test input"}
mock_invoke = MagicMock(return_value="FORMATTED PROMPT")

with patch.object(DefaultPromptStrategy, 'format_prompt', new=mock_invoke):
with patch.object(PromptStrategy, 'format_prompt', new=mock_invoke):
config = {"llm": TestLLM(), "llm_type": "test"}
model.trained_state.examples = [("EXAMPLE_X", "EXAMPLE_Y")]
result = model.invoke(input_dict, config=config)

print(result)
print(f"Called with {mock_invoke.call_count} {mock_invoke.call_args_list} {mock_invoke.call_args}")
call_args = {**input_dict, 'print_prompt': "TEST", 'trained_state': model.trained_state, 'use_training': True, 'llm_type': "test"}
call_args = {**input_dict, 'trained_state': model.trained_state, 'use_training': True, 'llm_type': "test"}
print(f"Expecting call {call_args}")
mock_invoke.assert_called_with(**call_args)

Expand All @@ -96,6 +66,6 @@ def test_use_training():

print(result)
print(f"Called with {mock_invoke.call_count} {mock_invoke.call_args_list} {mock_invoke.call_args}")
call_args = {**input_dict, 'print_prompt': "TEST", 'trained_state': model.trained_state, 'use_training': False, 'llm_type': "test"}
call_args = {**input_dict, 'trained_state': model.trained_state, 'use_training': False, 'llm_type': "test"}
print(f"Expecting call {call_args}")
mock_invoke.assert_called_with(**call_args)

0 comments on commit c92959c

Please sign in to comment.