Skip to content

Commit bd8aa8a

Browse files
authored
Merge branch 'generative-computing:main' into security_poc
2 parents 6e027a9 + e30afe6 commit bd8aa8a

25 files changed

+426
-51
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ repos:
2828
- id: uv-lock
2929

3030
- repo: https://github.com/codespell-project/codespell
31-
rev: v2.2.6
31+
rev: v2.4.1
3232
hooks:
3333
- id: codespell
3434
additional_dependencies:

CHANGELOG.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,21 @@
1+
## [v0.1.3](https://github.com/generative-computing/mellea/releases/tag/v0.1.3) - 2025-10-22
2+
3+
### Feature
4+
5+
* Decompose cli tool enhancements & new prompt_modules ([#170](https://github.com/generative-computing/mellea/issues/170)) ([`b8fc8e1`](https://github.com/generative-computing/mellea/commit/b8fc8e1bd9478d87c6a9c5cf5c0cca751f13bd11))
6+
* Add async functions ([#169](https://github.com/generative-computing/mellea/issues/169)) ([`689e1a9`](https://github.com/generative-computing/mellea/commit/689e1a942efab6cb1d7840f6bdbd96d579bdd684))
7+
* Add Granite Guardian 3.3 8B with updated examples function call validation and repair with reason. ([#167](https://github.com/generative-computing/mellea/issues/167)) ([`517e9c5`](https://github.com/generative-computing/mellea/commit/517e9c5fb93cba0b5f5a69278806fc0eda897785))
8+
* Majority voting sampling strategy ([#142](https://github.com/generative-computing/mellea/issues/142)) ([`36eaca4`](https://github.com/generative-computing/mellea/commit/36eaca482957353ba505d494f7be32c5226de651))
9+
10+
### Fix
11+
12+
* Fix vllm install script ([#185](https://github.com/generative-computing/mellea/issues/185)) ([`abcf622`](https://github.com/generative-computing/mellea/commit/abcf622347bfbb3c5d97c74a2624bf8f051f4136))
13+
* Watsonx and litellm parameter filtering ([#187](https://github.com/generative-computing/mellea/issues/187)) ([`793844c`](https://github.com/generative-computing/mellea/commit/793844c44ed091f4c6abae1cc711e3746a960ef4))
14+
* Pin trl to version 0.19.1 to avoid deprecation ([#202](https://github.com/generative-computing/mellea/issues/202)) ([`9948907`](https://github.com/generative-computing/mellea/commit/9948907303774494fee6286d482dd10525121ba2))
15+
* Rename format argument in internal methods for better mypiability ([#172](https://github.com/generative-computing/mellea/issues/172)) ([`7a6f780`](https://github.com/generative-computing/mellea/commit/7a6f780bdd71db0a7e0a1e78dfc78dcc4e4e5d93))
16+
* Async overhaul; create global event loop; add client cache ([#186](https://github.com/generative-computing/mellea/issues/186)) ([`1e236dd`](https://github.com/generative-computing/mellea/commit/1e236dd15bd426ed31f148ccdca4c63e43468fd0))
17+
* Update readme and other places with granite model and tweaks ([#184](https://github.com/generative-computing/mellea/issues/184)) ([`519a35a`](https://github.com/generative-computing/mellea/commit/519a35a7bb8a2547e90cf04fd5e70a3f74d9fc22))
18+
119
## [v0.1.2](https://github.com/generative-computing/mellea/releases/tag/v0.1.2) - 2025-10-03
220

321
### Feature

docs/examples/agents/react.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,12 @@
1313
import mellea.stdlib
1414
import mellea.stdlib.base
1515
import mellea.stdlib.chat
16+
from mellea.backends import model_ids
17+
from mellea.helpers.fancy_logger import FancyLogger
1618
from mellea.stdlib.base import ChatContext
1719

20+
FancyLogger.get_logger().setLevel("ERROR")
21+
1822
react_system_template: Template = Template(
1923
"""Answer the user's question as best you can.
2024

docs/examples/conftest.py

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
"""Allows you to use `pytest docs` to run the examples."""
2+
3+
import pathlib
4+
import subprocess
5+
import sys
6+
7+
import pytest
8+
9+
examples_to_skip = {
10+
"101_example.py",
11+
"__init__.py",
12+
"simple_rag_with_filter.py",
13+
"mcp_example.py",
14+
"client.py",
15+
}
16+
17+
18+
def pytest_terminal_summary(terminalreporter, exitstatus, config):
19+
# Append the skipped examples if needed.
20+
if len(examples_to_skip) == 0:
21+
return
22+
23+
terminalreporter.ensure_newline()
24+
terminalreporter.section("Skipped Examples", sep="=", blue=True, bold=True)
25+
terminalreporter.line(
26+
f"Examples with the following names were skipped because they cannot be easily run in the pytest framework; please run them manually:\n{'\n'.join(examples_to_skip)}"
27+
)
28+
29+
30+
# This doesn't replace the existing pytest file collection behavior.
31+
def pytest_collect_file(parent: pytest.Dir, file_path: pathlib.PosixPath):
32+
# Do a quick check that it's a .py file in the expected `docs/examples` folder. We can make
33+
# this more exact if needed.
34+
if (
35+
file_path.suffix == ".py"
36+
and "docs" in file_path.parts
37+
and "examples" in file_path.parts
38+
):
39+
# Skip this test. It requires additional setup.
40+
if file_path.name in examples_to_skip:
41+
return
42+
43+
return ExampleFile.from_parent(parent, path=file_path)
44+
45+
# TODO: Support running jupyter notebooks:
46+
# - use nbmake or directly use nbclient as documented below
47+
# - install the nbclient package
48+
# - run either using python api or jupyter execute
49+
# - must replace background processes
50+
# if file_path.suffix == ".ipynb":
51+
# return ExampleFile.from_parent(parent, path=file_path)
52+
53+
54+
class ExampleFile(pytest.File):
55+
def collect(self):
56+
return [ExampleItem.from_parent(self, name=self.name)]
57+
58+
59+
class ExampleItem(pytest.Item):
60+
def __init__(self, **kwargs):
61+
super().__init__(**kwargs)
62+
63+
def runtest(self):
64+
process = subprocess.Popen(
65+
[sys.executable, self.path],
66+
stdout=subprocess.PIPE,
67+
stderr=subprocess.PIPE,
68+
text=True,
69+
bufsize=1, # Enable line-buffering
70+
)
71+
72+
# Capture stdout output and output it so it behaves like a regular test with -s.
73+
stdout_lines = []
74+
if process.stdout is not None:
75+
for line in process.stdout:
76+
sys.stdout.write(line)
77+
sys.stdout.flush() # Ensure the output is printed immediately
78+
stdout_lines.append(line)
79+
process.stdout.close()
80+
81+
retcode = process.wait()
82+
83+
# Capture stderr output.
84+
stderr = ""
85+
if process.stderr is not None:
86+
stderr = process.stderr.read()
87+
88+
if retcode != 0:
89+
raise ExampleTestException(
90+
(f"Example failed with exit code {retcode}.\nStderr: {stderr}\n")
91+
)
92+
93+
def repr_failure(self, excinfo, style=None):
94+
"""Called when self.runtest() raises an exception."""
95+
if isinstance(excinfo.value, ExampleTestException):
96+
return str(excinfo.value)
97+
98+
return super().repr_failure(excinfo)
99+
100+
def reportinfo(self):
101+
return self.path, 0, f"usecase: {self.name}"
102+
103+
104+
class ExampleTestException(Exception):
105+
"""Custom exception for error reporting."""

docs/examples/image_text_models/vision_litellm_backend.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,15 @@
99
from mellea.backends.litellm import LiteLLMBackend
1010
from mellea.backends.openai import OpenAIBackend
1111
from mellea.stdlib.base import ImageBlock
12+
import pathlib
1213

1314
# use LiteLLM to talk to Ollama or anthropic or.....
1415
m = MelleaSession(LiteLLMBackend("ollama/granite3.2-vision"))
1516
# m = MelleaSession(LiteLLMBackend("ollama/llava"))
1617
# m = MelleaSession(LiteLLMBackend("anthropic/claude-3-haiku-20240307"))
1718

18-
test_pil = Image.open("pointing_up.jpg")
19+
image_path = pathlib.Path(__file__).parent.joinpath("pointing_up.jpg")
20+
test_pil = Image.open(image_path)
1921

2022
# check if model is able to do text chat
2123
ch = m.chat("What's 1+1?")

docs/examples/image_text_models/vision_ollama_chat.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""Example of using Ollama with vision models with linear context."""
22

3+
import pathlib
34
from PIL import Image
45

56
from mellea import start_session
@@ -9,10 +10,11 @@
910
# m = start_session(model_id="llava", ctx=ChatContext())
1011

1112
# load image
12-
test_img = Image.open("pointing_up.jpg")
13+
image_path = pathlib.Path(__file__).parent.joinpath("pointing_up.jpg")
14+
test_pil = Image.open(image_path)
1315

1416
# ask a question about the image
15-
res = m.instruct("Is the subject in the image smiling?", images=[test_img])
17+
res = m.instruct("Is the subject in the image smiling?", images=[test_pil])
1618
print(f"Result:{res!s}")
1719

1820
# This instruction should refer to the first image.

docs/examples/image_text_models/vision_openai_examples.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,36 @@
11
"""Examples using vision models with OpenAI backend."""
22

3-
import os
3+
import pathlib
44

55
from PIL import Image
66

77
from mellea import MelleaSession
88
from mellea.backends.openai import OpenAIBackend
9-
from mellea.stdlib.base import ImageBlock
9+
from mellea.stdlib.base import ChatContext, ImageBlock
1010

1111
# # using anthropic AI model ...
1212
# anth_key = os.environ.get("ANTHROPIC_API_KEY")
1313
# m = MelleaSession(OpenAIBackend(model_id="claude-3-haiku-20240307",
1414
# api_key=anth_key, # Your Anthropic API key
1515
# base_url="https://api.anthropic.com/v1/" # Anthropic's API endpoint
16-
# ))
16+
# ),
17+
# ctx=ChatContext())
1718

1819
# using LM Studio model locally
20+
# m = MelleaSession(
21+
# OpenAIBackend(model_id="qwen/qwen2.5-vl-7b", base_url="http://127.0.0.1:1234/v1"), ctx=ChatContext()
22+
# )
23+
1924
m = MelleaSession(
20-
OpenAIBackend(model_id="qwen/qwen2.5-vl-7b", base_url="http://127.0.0.1:1234/v1")
25+
OpenAIBackend(
26+
model_id="qwen2.5vl:7b", base_url="http://localhost:11434/v1", api_key="ollama"
27+
),
28+
ctx=ChatContext(),
2129
)
2230

2331
# load PIL image and convert to mellea ImageBlock
24-
test_pil = Image.open("pointing_up.jpg")
32+
image_path = pathlib.Path(__file__).parent.joinpath("pointing_up.jpg")
33+
test_pil = Image.open(image_path)
2534
test_img = ImageBlock.from_pil_image(test_pil)
2635

2736
# check if model is able to do text chat

docs/examples/mcp/README.md

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# Write a poem MCP
2+
This is a simple example to show how to write a MCP tool
3+
with Mellea and instruct-validate-repair. Being able to
4+
speak the tool language allows you to integrate with
5+
Claude Desktop, Langflow, ...
6+
7+
See code in [mcp_example.py](mcp_example.py)
8+
9+
## Run the example
10+
You need to install the mcp package:
11+
```bash
12+
uv pip install "mcp[cli]"
13+
```
14+
15+
and run the example in MCP debug UI:
16+
```bash
17+
uv run mcp dev docs/examples/tutorial/mcp_example.py
18+
```
19+
20+
21+
## Use in Langflow
22+
Follow this path (JSON) to use it in Langflow: [https://docs.langflow.org/mcp-client#mcp-stdio-mode](https://docs.langflow.org/mcp-client#mcp-stdio-mode)
23+
24+
The JSON to register your MCP tool is the following. Be sure to insert the absolute path to the directory containing the mcp_example.py file:
25+
26+
```json
27+
{
28+
"mcpServers": {
29+
"mellea_mcp_server": {
30+
"command": "uv",
31+
"args": [
32+
"--directory",
33+
"<ABSOLUTE PATH>/mellea/docs/examples/mcp",
34+
"run",
35+
"mcp",
36+
"run",
37+
"mcp_example.py"
38+
]
39+
}
40+
}
41+
}
42+
```
43+
44+
45+
46+

docs/examples/mcp/mcp_example.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
"""Example of an MCP server.
2+
3+
You need to install the mcp package:
4+
uv pip install "mcp[cli]"
5+
6+
and run the example in MCP debug UI:
7+
uv run mcp dev docs/examples/tutorial/mcp_example.py
8+
"""
9+
10+
from mcp.server.fastmcp import FastMCP
11+
12+
from mellea import MelleaSession
13+
from mellea.backends import ModelOption, model_ids
14+
from mellea.backends.ollama import OllamaModelBackend
15+
from mellea.stdlib.base import ModelOutputThunk
16+
from mellea.stdlib.requirement import Requirement, simple_validate
17+
from mellea.stdlib.sampling import RejectionSamplingStrategy
18+
19+
# #################
20+
# run MCP debug UI with: uv run mcp dev docs/examples/tutorial/mcp_example.py
21+
# ##################
22+
23+
24+
# Create an MCP server
25+
mcp = FastMCP("Demo")
26+
27+
28+
@mcp.tool()
29+
def write_a_poem(word_limit: int) -> str:
30+
"""Write a poem with a word limit."""
31+
m = MelleaSession(
32+
OllamaModelBackend(
33+
model_ids.HF_SMOLLM2_2B,
34+
model_options={ModelOption.MAX_NEW_TOKENS: word_limit + 10},
35+
)
36+
)
37+
wl_req = Requirement(
38+
f"Use only {word_limit} words.",
39+
validation_fn=simple_validate(lambda x: len(x.split(" ")) < word_limit),
40+
)
41+
42+
res = m.instruct(
43+
"Write a poem",
44+
requirements=[wl_req],
45+
strategy=RejectionSamplingStrategy(loop_budget=2),
46+
)
47+
assert isinstance(res, ModelOutputThunk)
48+
return str(res.value)
49+
50+
51+
@mcp.resource("greeting://{name}")
52+
def get_greeting(name: str) -> str:
53+
"""Get a personalized greeting."""
54+
return f"Hello, {name}!"

docs/examples/mcp/mcp_server.json

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
{
2+
"mcpServers": {
3+
"mellea_mcp_server": {
4+
"command": "uv",
5+
"args": [
6+
"--directory",
7+
"<ABSOLUTE PATH>/mellea/docs/examples/mcp",
8+
"run",
9+
"mcp",
10+
"run",
11+
"mcp_example.py"
12+
]
13+
}
14+
}
15+
}

0 commit comments

Comments
 (0)