Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions packages/ai-providers/server-ai-langchain/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ dependencies = [
"langchain>=1.0.0",
]

[project.optional-dependencies]
graph = ["langgraph>=1.0.0"]

[project.urls]
Homepage = "https://docs.launchdarkly.com/sdk/ai/python"
Repository = "https://github.com/launchdarkly/python-server-sdk-ai"
Expand All @@ -36,6 +39,7 @@ dev = [
"mypy==1.18.2",
"pycodestyle>=2.11.0",
"isort>=5.12.0",
"langgraph>=1.0.0",
]

[build-system]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,22 @@
"""LangChain agent runner for LaunchDarkly AI SDK."""

from typing import Any

from ldai import log
from ldai.providers import AgentResult, AgentRunner
from ldai.providers.types import LDAIMetrics

from ldai_langchain.langchain_helper import sum_token_usage_from_messages
from ldai_langchain.langchain_helper import (
extract_last_message_content,
sum_token_usage_from_messages,
)


class LangChainAgentRunner(AgentRunner):
"""
CAUTION:
This feature is experimental and should NOT be considered ready for production use.
It may change or be removed without notice and is not subject to backwards
compatibility guarantees.

AgentRunner implementation for LangChain.

Wraps a compiled LangChain agent graph (from ``langchain.agents.create_agent``)
Expand All @@ -37,11 +43,7 @@ async def run(self, input: Any) -> AgentResult:
"messages": [{"role": "user", "content": str(input)}]
})
messages = result.get("messages", [])
output = ""
if messages:
last = messages[-1]
if hasattr(last, 'content') and isinstance(last.content, str):
output = last.content
output = extract_last_message_content(messages)
return AgentResult(
output=output,
raw=result,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Any, Dict, List, Optional, Union

from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from ldai import LDMessage, log
from ldai.models import AIConfigKind
from ldai.providers import ToolRegistry
Expand Down Expand Up @@ -51,18 +51,12 @@ def convert_messages_to_langchain(
return result


def create_langchain_model(ai_config: AIConfigKind, tool_registry: Optional[ToolRegistry] = None) -> BaseChatModel:
def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
"""
Create a LangChain BaseChatModel from a LaunchDarkly AI configuration.

If the config includes tool definitions and a tool_registry is provided, tools found
in the registry are bound to the model. Tools not found in the registry are skipped
with a warning. Built-in provider tools (e.g. code_interpreter) are not supported
via LangChain's bind_tools abstraction and are skipped with a warning.

:param ai_config: The LaunchDarkly AI configuration
:param tool_registry: Optional registry mapping tool names to callable implementations
:return: A configured LangChain BaseChatModel, with tools bound if applicable
:return: A configured LangChain BaseChatModel
"""
from langchain.chat_models import init_chat_model

Expand All @@ -73,112 +67,51 @@ def create_langchain_model(ai_config: AIConfigKind, tool_registry: Optional[Tool
model_name = model_dict.get('name', '')
provider = provider_dict.get('name', '')
parameters = dict(model_dict.get('parameters') or {})
tool_definitions = parameters.pop('tools', []) or []
parameters.pop('tools', None)
mapped_provider = map_provider(provider)

# Bedrock requires the foundation provider (e.g. Bedrock:Anthropic) passed in
# parameters separately from model_provider, which is used for LangChain routing.
if mapped_provider == 'bedrock_converse' and 'provider' not in parameters:
parameters['provider'] = provider.removeprefix('bedrock:')

model = init_chat_model(
return init_chat_model(
model_name,
model_provider=mapped_provider,
**parameters,
)

if tool_definitions and tool_registry is not None:
bindable = _resolve_tools_for_langchain(tool_definitions, tool_registry)
if bindable:
model = model.bind_tools(bindable)

return model


def _iter_valid_tools(
tool_definitions: List[Dict[str, Any]],
tool_registry: ToolRegistry,
) -> List[tuple]:
"""
Filter LD tool definitions against a registry, returning (name, td) pairs for each
valid function tool that has a callable implementation. Built-in provider tools and
tools missing from the registry are skipped with a warning.
"""
valid = []
for td in tool_definitions:
if not isinstance(td, dict):
continue

tool_type = td.get('type')
if tool_type and tool_type != 'function':
log.warning(
f"Built-in tool '{tool_type}' is not reliably supported via LangChain and will be skipped. "
"Use a provider-specific runner to use built-in provider tools."
)
continue

name = td.get('name')
if not name:
continue

if name not in tool_registry:
log.warning(f"Tool '{name}' is defined in the AI config but was not found in the tool registry; skipping.")
continue

valid.append((name, td))

return valid


def _resolve_tools_for_langchain(
tool_definitions: List[Dict[str, Any]],
tool_registry: ToolRegistry,
) -> List[Dict[str, Any]]:
"""
Match LD tool definitions against a registry, returning function-calling tool dicts
for tools that have a callable implementation. Built-in provider tools and tools
missing from the registry are skipped with a warning.
"""
return [
{
'type': 'function',
'function': {
'name': name,
'description': td.get('description', ''),
'parameters': td.get('parameters', {'type': 'object', 'properties': {}}),
},
}
for name, td in _iter_valid_tools(tool_definitions, tool_registry)
]


def build_structured_tools(ai_config: AIConfigKind, tool_registry: ToolRegistry) -> List[Any]:
def build_tools(ai_config: AIConfigKind, tool_registry: ToolRegistry) -> List[Any]:
"""
Build a list of LangChain StructuredTool instances from LD tool definitions and a registry.
Return callables from the registry for each tool defined in the AI config.

Tools found in the registry are wrapped as StructuredTool with the name and description
from the LD config. Built-in provider tools and tools missing from the registry are
skipped with a warning.
Tools not found in the registry are skipped with a warning. The returned
callables can be passed directly to bind_tools or langchain.agents.create_agent.
Functions should have type-annotated parameters so LangChain can infer the schema.

:param ai_config: The LaunchDarkly AI configuration
:param tool_registry: Registry mapping tool names to callable implementations
:return: List of StructuredTool instances ready to pass to langchain.agents.create_agent
:return: List of callables ready to pass to bind_tools or create_agent
"""
from langchain_core.tools import StructuredTool

config_dict = ai_config.to_dict()
model_dict = config_dict.get('model') or {}
parameters = dict(model_dict.get('parameters') or {})
tool_definitions = parameters.pop('tools', []) or []

return [
StructuredTool.from_function(
func=tool_registry[name],
name=name,
description=td.get('description', ''),
)
for name, td in _iter_valid_tools(tool_definitions, tool_registry)
]
tools = []
for td in tool_definitions:
if not isinstance(td, dict):
continue
name = td.get('name')
if not name:
continue
fn = tool_registry.get(name)
if fn is None:
log.warning(f"Tool '{name}' is defined in the AI config but was not found in the tool registry; skipping.")
continue
tools.append(fn)
return tools


def get_ai_usage_from_response(response: Any) -> Optional[TokenUsage]:
Expand Down Expand Up @@ -234,6 +167,20 @@ def get_tool_calls_from_response(response: Any) -> List[str]:
return names


def extract_last_message_content(messages: List[Any]) -> str:
"""
Extract the string content of the last message in a list.

:param messages: List of LangChain message objects
:return: String content of the last message, or empty string if none or content is not a str
"""
if messages:
last = messages[-1]
if hasattr(last, 'content') and isinstance(last.content, str):
return last.content
return ''


def sum_token_usage_from_messages(messages: List[Any]) -> Optional[TokenUsage]:
"""
Sum token usage across LangChain messages using get_ai_usage_from_response per message.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from ldai_langchain.langchain_agent_runner import LangChainAgentRunner
from ldai_langchain.langchain_helper import (
build_structured_tools,
build_tools,
create_langchain_model,
)
from ldai_langchain.langchain_model_runner import LangChainModelRunner
Expand All @@ -14,8 +14,38 @@
class LangChainRunnerFactory(AIProvider):
"""LangChain ``AIProvider`` implementation for the LaunchDarkly AI SDK."""

def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> LangChainAgentRunner:
"""
CAUTION:
This feature is experimental and should NOT be considered ready for production use.
It may change or be removed without notice and is not subject to backwards
compatibility guarantees.

Create a configured LangChainAgentRunner for the given AI agent config.

:param config: The LaunchDarkly AI agent configuration
:param tools: ToolRegistry mapping tool names to callables
:return: LangChainAgentRunner ready to run the agent
"""
from langchain.agents import create_agent as lc_create_agent
instructions = (config.instructions or '') if hasattr(config, 'instructions') else ''
llm = create_langchain_model(config)
lc_tools = build_tools(config, tools or {})

agent = lc_create_agent(
llm,
tools=lc_tools or None,
system_prompt=instructions or None,
)
return LangChainAgentRunner(agent)

def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any:
"""
CAUTION:
This feature is experimental and should NOT be considered ready for production use.
It may change or be removed without notice and is not subject to backwards
compatibility guarantees.

Create a configured LangGraphAgentGraphRunner for the given graph definition.

:param graph_def: The AgentGraphDefinition to execute
Expand All @@ -36,23 +66,3 @@ def create_model(self, config: AIConfigKind) -> LangChainModelRunner:
"""
llm = create_langchain_model(config)
return LangChainModelRunner(llm)

def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> LangChainAgentRunner:
"""
Create a configured LangChainAgentRunner for the given AI agent config.

:param config: The LaunchDarkly AI agent configuration
:param tools: ToolRegistry mapping tool names to callables
:return: LangChainAgentRunner ready to run the agent
"""
from langchain.agents import create_agent as lc_create_agent
instructions = (config.instructions or '') if hasattr(config, 'instructions') else ''
llm = create_langchain_model(config)
lc_tools = build_structured_tools(config, tools or {})

agent = lc_create_agent(
llm,
tools=lc_tools or None,
system_prompt=instructions or None,
)
return LangChainAgentRunner(agent)
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
"""LangGraph agent graph runner for LaunchDarkly AI SDK."""

import operator
import time
from typing import Annotated, Any, List
Expand All @@ -10,7 +8,9 @@
from ldai.providers.types import LDAIMetrics

from ldai_langchain.langchain_helper import (
build_tools,
create_langchain_model,
extract_last_message_content,
get_ai_metrics_from_response,
get_ai_usage_from_response,
get_tool_calls_from_response,
Expand All @@ -20,6 +20,11 @@

class LangGraphAgentGraphRunner(AgentGraphRunner):
"""
CAUTION:
This feature is experimental and should NOT be considered ready for production use.
It may change or be removed without notice and is not subject to backwards
compatibility guarantees.

AgentGraphRunner implementation for LangGraph.

Compiles and runs the agent graph with LangGraph and automatically records
Expand Down Expand Up @@ -73,12 +78,7 @@ def handle_traversal(node: AgentGraphNode, ctx: dict) -> None:
model = None
if node_config.model:
lc_model = create_langchain_model(node_config)
tool_defs = node_config.model.get_parameter('tools') or []
tool_fns = [
tools_ref[t.get('name', '')]
for t in tool_defs
if t.get('name', '') in tools_ref
]
tool_fns = build_tools(node_config, tools_ref)
model = lc_model.bind_tools(tool_fns) if tool_fns else lc_model

def invoke(state: WorkflowState) -> WorkflowState:
Expand Down Expand Up @@ -124,12 +124,8 @@ def invoke(state: WorkflowState) -> WorkflowState:
)
duration = (time.perf_counter_ns() - start_ns) // 1_000_000

output = ''
messages = result.get('messages', [])
if messages:
last = messages[-1]
if hasattr(last, 'content'):
output = str(last.content)
output = extract_last_message_content(messages)
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shared helper silently drops non-string langgraph output

Low Severity

The extract_last_message_content helper adds an isinstance(last.content, str) check that the langgraph runner previously did not have. The old langgraph code used str(last.content), which would convert any content type (including lists from multimodal responses) to a string. The new shared function returns '' for non-string content, silently dropping output in cases where AIMessage.content is a list (e.g., multimodal or mixed tool-use responses).

Additional Locations (1)
Fix in Cursor Fix in Web

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We will look into this in a follow up PR.


if tracker:
tracker.track_path(exec_path)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ def test_creates_agent_runner_with_instructions_and_tool_definitions(self):

mock_agent = MagicMock()
with patch('ldai_langchain.langchain_runner_factory.create_langchain_model') as mock_create, \
patch('ldai_langchain.langchain_runner_factory.build_structured_tools') as mock_tools, \
patch('ldai_langchain.langchain_runner_factory.build_tools') as mock_tools, \
patch('langchain.agents.create_agent', return_value=mock_agent):
mock_create.return_value = MagicMock()
mock_tools.return_value = [MagicMock()]
Expand All @@ -447,7 +447,7 @@ def test_creates_agent_runner_with_no_tools(self):

mock_agent = MagicMock()
with patch('ldai_langchain.langchain_runner_factory.create_langchain_model') as mock_create, \
patch('ldai_langchain.langchain_runner_factory.build_structured_tools', return_value=[]), \
patch('ldai_langchain.langchain_runner_factory.build_tools', return_value=[]), \
patch('langchain.agents.create_agent', return_value=mock_agent):
mock_create.return_value = MagicMock()

Expand Down
Loading
Loading