Skip to content

Commit

Permalink
v3.12.1
Browse files Browse the repository at this point in the history
- Possibility to force-disable streaming when callbacks was provided (openai, anthropic API)
- Fix tests when .env file is present
  • Loading branch information
Nayjest committed Dec 10, 2024
1 parent c4c87fd commit 3dcfc0f
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 7 deletions.
2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ init:
docker-compose up -d --build && $(DEXEC) bash
sh:
$(DEXEC) bash
start:
docker-compose start
stop:
docker-compose stop
cs:
Expand Down
2 changes: 1 addition & 1 deletion microcore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,4 +150,4 @@ def has_content(self, collection: str) -> bool:
# "wrappers",
]

__version__ = "3.12.0"
__version__ = "3.12.1"
8 changes: 6 additions & 2 deletions microcore/llm/_openai_llm_v0.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ def _prepare_llm_arguments(config: Config, kwargs: dict):
cb = args.pop("callback")
if cb:
callbacks.append(cb)
args["stream"] = bool(callbacks)
if "stream" not in args:
args["stream"] = bool(callbacks)
return args, {"callbacks": callbacks}


Expand All @@ -92,7 +93,10 @@ async def allm(prompt, **kwargs):
)

for cb in options["callbacks"]:
cb(response.choices[0].message.content)
if asyncio.iscoroutinefunction(cb):
await cb(response.choices[0].message.content)
else:
cb(response.choices[0].message.content)
return LLMResponse(response.choices[0].message.content, response)

response = await openai.Completion.acreate(
Expand Down
8 changes: 6 additions & 2 deletions microcore/llm/_openai_llm_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ def _prepare_llm_arguments(config: Config, kwargs: dict):
cb = args.pop("callback")
if cb:
callbacks.append(cb)
args["stream"] = bool(callbacks)
if "stream" not in args:
args["stream"] = bool(callbacks)
return args, {"callbacks": callbacks}


Expand Down Expand Up @@ -107,7 +108,10 @@ async def allm(prompt, **kwargs):
)

for cb in options["callbacks"]:
cb(response.choices[0].message.content)
if asyncio.iscoroutinefunction(cb):
await cb(response.choices[0].message.content)
else:
cb(response.choices[0].message.content)
return LLMResponse(response.choices[0].message.content, response.__dict__)

response = await _async_connection.completions.create(
Expand Down
8 changes: 6 additions & 2 deletions microcore/llm/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ def _prepare_llm_arguments(config: Config, kwargs: dict):
cb = args.pop("callback")
if cb:
callbacks.append(cb)
args["stream"] = bool(callbacks)
if "stream" not in args:
args["stream"] = bool(callbacks)
return args, {"callbacks": callbacks}


Expand Down Expand Up @@ -105,7 +106,10 @@ async def allm(prompt, **kwargs):
return await _a_process_streamed_response(response, options["callbacks"])

for cb in options["callbacks"]:
cb(response.content[0].text)
if asyncio.iscoroutinefunction(cb):
await cb(response.content[0].text)
else:
cb(response.content[0].text)
return LLMResponse(response.content[0].text, response.__dict__)

def llm(prompt, **kwargs):
Expand Down
3 changes: 3 additions & 0 deletions tests/basic/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@


def test_config(monkeypatch):
# avoid using .env file
monkeypatch.setenv("USE_DOT_ENV", "")

monkeypatch.setenv("LLM_API_KEY", "123")
monkeypatch.setenv("PROMPT_TEMPLATES_PATH", "mypath")
assert Config().PROMPT_TEMPLATES_PATH == "mypath"
Expand Down
21 changes: 21 additions & 0 deletions tests/basic/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,24 @@ async def test_llm_mocked_parrot(setup):
assert mc.llm("ok", model="chat-llama") == "ok"
assert await mc.allm("ok", model="gpt-4") == "ok"
assert await mc.allm("ok", model="gpt-3.5-instruct") == "completion:ok"

@pytest.mark.asyncio
async def test_llm_no_streaming(setup):
t = ""
def fn(text):
nonlocal t
t += text

mc.llm("ok", model="gpt-4", stream=False, callback=fn)
assert t == "ok"

def afn(text):
nonlocal t
t += text
t = ""
mc.llm("ok2", model="gpt-4", stream=False, callback=afn)
assert t == "ok2"

t = ""
mc.llm("ok", model="gpt-4", stream=False, callbacks=[afn,fn])
assert t == "okok"

0 comments on commit 3dcfc0f

Please sign in to comment.