Skip to content

Commit

Permalink
ignore types due anthropic [AUTH-1050]
Browse files Browse the repository at this point in the history
  • Loading branch information
Elyorcv committed Nov 19, 2024
1 parent f188e93 commit 230523e
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
"login": "Login",
"logout": "Logout",
"make_sure_your_prompt": "Write a prompt in a natural language for OpentronsAI to generate a protocol using the Opentrons Python Protocol API v2. The better the prompt, the better the quality of the protocol produced by OpentronsAI.",
"modify_intro": "Modify the following Python code using the Opentrons Python Protocol API v2. Ensure that the new labware and pipettes are compatible with the robot type specified in the protocol.\n\n",
"modify_intro": "Modify the following Python code using the Opentrons Python Protocol API v2. Ensure that the new labware and pipettes are compatible with the Flex robot.\n\n",
"modify_python_code": "Original Python Code:\n",
"modify_type_of_update": "Type of update:\n- ",
"modify_details_of_change": "Detail of changes:\n- ",
Expand Down
6 changes: 3 additions & 3 deletions opentrons-ai-server/Pipfile.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 7 additions & 7 deletions opentrons-ai-server/api/domain/anthropic_predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self, settings: Settings) -> None:
{
"role": "user",
"content": [
{"type": "text", "text": DOCUMENTS.format(doc_content=self.get_docs()), "cache_control": {"type": "ephemeral"}}
{"type": "text", "text": DOCUMENTS.format(doc_content=self.get_docs()), "cache_control": {"type": "ephemeral"}} # type: ignore
],
}
]
Expand Down Expand Up @@ -84,7 +84,7 @@ def generate_message(self, max_tokens: int = 4096) -> Message:
system=self.system_prompt,
max_tokens=max_tokens,
messages=self._messages,
tools=self.tools,
tools=self.tools, # type: ignore
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"},
)

Expand All @@ -107,7 +107,7 @@ def predict(self, prompt: str) -> str | None:
if response.content[-1].type == "tool_use":
tool_use = response.content[-1]
self._messages.append({"role": "assistant", "content": response.content})
result = self.handle_tool_use(tool_use.name, tool_use.input)
result = self.handle_tool_use(tool_use.name, tool_use.input) # type: ignore
self._messages.append(
{
"role": "user",
Expand All @@ -121,7 +121,7 @@ def predict(self, prompt: str) -> str | None:
}
)
follow_up = self.generate_message()
response_text = follow_up.content[0].text
response_text = follow_up.content[0].text # type: ignore
self._messages.append({"role": "assistant", "content": response_text})
return response_text

Expand Down Expand Up @@ -154,7 +154,7 @@ def reset(self) -> None:
{
"role": "user",
"content": [
{"type": "text", "text": DOCUMENTS.format(doc_content=self.get_docs()), "cache_control": {"type": "ephemeral"}}
{"type": "text", "text": DOCUMENTS.format(doc_content=self.get_docs()), "cache_control": {"type": "ephemeral"}} # type: ignore
],
}
]
Expand All @@ -175,9 +175,9 @@ def simulate_protocol(self, protocol: str) -> str:
response_data = response.json()
if "error_message" in response_data:
logger.error("Simulation error", extra={"error": response_data["error_message"]})
return response_data["error_message"]
return str(response_data["error_message"])
elif "protocol_name" in response_data:
return response_data["run_status"]
return str(response_data["run_status"])
else:
logger.error("Unexpected response", extra={"response": response_data})
return "Unexpected response"
Expand Down
17 changes: 10 additions & 7 deletions opentrons-ai-server/api/handler/fast.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import asyncio
import os
import time
from typing import Annotated, Any, Awaitable, Callable, List, Literal, Union
from typing import Annotated, Any, Awaitable, Callable, List, Literal, Optional, Union

import structlog
from asgi_correlation_id import CorrelationIdMiddleware
Expand Down Expand Up @@ -198,10 +198,11 @@ async def create_chat_completion(
return ChatResponse(reply=fake.chat_response.reply, fake=fake.chat_response.fake)
return ChatResponse(reply="Default fake response. ", fake=body.fake)

response: Optional[str] = None
if "openai" in settings.model.lower():
response: Union[str, None] = openai.predict(prompt=body.message, chat_completion_message_params=body.history)
response = openai.predict(prompt=body.message, chat_completion_message_params=body.history)
else:
response: Union[str, None] = claude.predict(prompt=body.message)
response = claude.predict(prompt=body.message)

if response is None or response == "":
return ChatResponse(reply="No response was generated", fake=bool(body.fake))
Expand Down Expand Up @@ -241,10 +242,11 @@ async def update_protocol(
if body.fake:
return ChatResponse(reply="Fake response", fake=bool(body.fake))

response: Optional[str] = None
if "openai" in settings.model.lower():
response: Union[str, None] = openai.predict(prompt=body.prompt, chat_completion_message_params=None)
response = openai.predict(prompt=body.prompt, chat_completion_message_params=None)
else:
response: Union[str, None] = claude.predict(prompt=body.prompt)
response = claude.predict(prompt=body.prompt)

if response is None or response == "":
return ChatResponse(reply="No response was generated", fake=bool(body.fake))
Expand Down Expand Up @@ -285,10 +287,11 @@ async def create_protocol(
if body.fake:
return ChatResponse(reply="Fake response", fake=body.fake)

response: Optional[str] = None
if "openai" in settings.model.lower():
response: Union[str, None] = openai.predict(prompt=str(body.model_dump()), chat_completion_message_params=None)
response = openai.predict(prompt=str(body.model_dump()), chat_completion_message_params=None)
else:
response: Union[str, None] = claude.predict(prompt=str(body.model_dump()))
response = claude.predict(prompt=str(body.model_dump()))

if response is None or response == "":
return ChatResponse(reply="No response was generated", fake=bool(body.fake))
Expand Down

0 comments on commit 230523e

Please sign in to comment.