Skip to content

Commit 4bef65c

Browse files
committed
feat: Support additional create methods for agent and agent_graph
feat!: Rename AIProviderFactory → RunnerFactory feat!: Rename OpenAIProvider → OpenAIRunnerFactory import from ldai_openai.openai_runner_factory feat!: Rename LangChainProvider to LangChainRunnerFactory import from ldai_langchain.langchain_runner_factory feat: Add create_model(), create_agent(), create_agent_graph() to AIProvider ABC (non-abstract, default warns)
1 parent 397d2d4 commit 4bef65c

File tree

11 files changed

+363
-274
lines changed

11 files changed

+363
-274
lines changed
Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,10 @@
1-
"""LaunchDarkly AI SDK - LangChain Provider.
1+
"""LaunchDarkly AI SDK - LangChain Connector."""
22

3-
This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK,
4-
"""
5-
6-
from ldai_langchain.langchain_provider import LangChainProvider
3+
from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory
74

85
__version__ = "0.1.0"
96

107
__all__ = [
118
'__version__',
12-
'LangChainProvider',
9+
'LangChainRunnerFactory',
1310
]

packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py renamed to packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py

Lines changed: 48 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""LangChain implementation of AIProvider for LaunchDarkly AI SDK."""
1+
"""LangChain connector for LaunchDarkly AI SDK."""
22

33
from typing import Any, Dict, List, Optional, Union
44

@@ -11,31 +11,46 @@
1111
from ldai.tracker import TokenUsage
1212

1313

14-
class LangChainProvider(AIProvider):
14+
class LangChainRunnerFactory(AIProvider):
1515
"""
16-
LangChain implementation of AIProvider.
17-
18-
This provider integrates LangChain models with LaunchDarkly's tracking capabilities.
16+
LangChain connector for the LaunchDarkly AI SDK.
17+
18+
Can be used in two ways:
19+
- Transparently via ExecutorFactory (pass ``default_ai_provider='langchain'`` to
20+
``create_model()`` / ``create_chat()``).
21+
- Directly for full control: instantiate with a ``BaseChatModel``, then call
22+
``invoke_model()`` yourself and use the static convenience methods
23+
(``get_ai_metrics_from_response``, ``convert_messages_to_langchain``,
24+
``map_provider``, ``create_langchain_model``).
1925
"""
2026

21-
def __init__(self, llm: BaseChatModel):
27+
def __init__(self, llm: Optional[BaseChatModel] = None):
2228
"""
23-
Initialize the LangChain provider.
29+
Initialize the LangChain connector.
30+
31+
When called with no arguments the connector acts as a per-provider factory
32+
— call ``create_model(config)`` to obtain a configured instance.
2433
25-
:param llm: A LangChain BaseChatModel instance
34+
When called with an explicit ``llm`` the connector is ready to invoke
35+
the model immediately.
36+
37+
:param llm: A LangChain BaseChatModel instance (optional)
2638
"""
2739
self._llm = llm
2840

29-
@staticmethod
30-
async def create(ai_config: AIConfigKind) -> 'LangChainProvider':
41+
# --- AIProvider factory methods ---
42+
43+
def create_model(self, config: AIConfigKind) -> 'LangChainRunnerFactory':
3144
"""
32-
Static factory method to create a LangChain AIProvider from an AI configuration.
45+
Create a configured LangChain model connector for the given AI config.
3346
34-
:param ai_config: The LaunchDarkly AI configuration
35-
:return: Configured LangChainProvider instance
47+
:param config: The LaunchDarkly AI configuration
48+
:return: Configured LangChainRunnerFactory ready to invoke the model
3649
"""
37-
llm = LangChainProvider.create_langchain_model(ai_config)
38-
return LangChainProvider(llm)
50+
llm = LangChainRunnerFactory.create_langchain_model(config)
51+
return LangChainRunnerFactory(llm)
52+
53+
# --- Model invocation ---
3954

4055
async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
4156
"""
@@ -45,9 +60,9 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
4560
:return: ChatResponse containing the model's response and metrics
4661
"""
4762
try:
48-
langchain_messages = LangChainProvider.convert_messages_to_langchain(messages)
63+
langchain_messages = LangChainRunnerFactory.convert_messages_to_langchain(messages)
4964
response: BaseMessage = await self._llm.ainvoke(langchain_messages)
50-
metrics = LangChainProvider.get_ai_metrics_from_response(response)
65+
metrics = LangChainRunnerFactory.get_ai_metrics_from_response(response)
5166

5267
content: str = ''
5368
if isinstance(response.content, str):
@@ -84,7 +99,7 @@ async def invoke_structured_model(
8499
:return: StructuredResponse containing the structured data
85100
"""
86101
try:
87-
langchain_messages = LangChainProvider.convert_messages_to_langchain(messages)
102+
langchain_messages = LangChainRunnerFactory.convert_messages_to_langchain(messages)
88103
structured_llm = self._llm.with_structured_output(response_structure)
89104
response = await structured_llm.ainvoke(langchain_messages)
90105

@@ -122,11 +137,13 @@ async def invoke_structured_model(
122137
),
123138
)
124139

125-
def get_chat_model(self) -> BaseChatModel:
140+
# --- Convenience accessors ---
141+
142+
def get_chat_model(self) -> Optional[BaseChatModel]:
126143
"""
127144
Get the underlying LangChain model instance.
128145
129-
:return: The underlying BaseChatModel
146+
:return: The underlying BaseChatModel, or None if not yet configured
130147
"""
131148
return self._llm
132149

@@ -135,9 +152,6 @@ def map_provider(ld_provider_name: str) -> str:
135152
"""
136153
Map LaunchDarkly provider names to LangChain provider names.
137154
138-
This method enables seamless integration between LaunchDarkly's standardized
139-
provider naming and LangChain's naming conventions.
140-
141155
:param ld_provider_name: LaunchDarkly provider name
142156
:return: LangChain-compatible provider name
143157
"""
@@ -152,25 +166,24 @@ def map_provider(ld_provider_name: str) -> str:
152166
@staticmethod
153167
def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics:
154168
"""
155-
Get AI metrics from a LangChain provider response.
156-
157-
This method extracts token usage information and success status from LangChain responses
158-
and returns a LaunchDarkly AIMetrics object.
169+
Extract LaunchDarkly AI metrics from a LangChain response.
159170
160171
:param response: The response from the LangChain model
161172
:return: LDAIMetrics with success status and token usage
162173
163-
Example:
164-
# Use with tracker.track_metrics_of for automatic tracking
174+
Example::
175+
165176
response = await tracker.track_metrics_of(
166177
lambda: llm.ainvoke(messages),
167-
LangChainProvider.get_ai_metrics_from_response
178+
LangChainRunnerFactory.get_ai_metrics_from_response
168179
)
169180
"""
170-
# Extract token usage if available
171181
usage: Optional[TokenUsage] = None
172182
if hasattr(response, 'response_metadata') and response.response_metadata:
173-
token_usage = response.response_metadata.get('tokenUsage') or response.response_metadata.get('token_usage')
183+
token_usage = (
184+
response.response_metadata.get('tokenUsage')
185+
or response.response_metadata.get('token_usage')
186+
)
174187
if token_usage:
175188
usage = TokenUsage(
176189
total=token_usage.get('totalTokens', 0) or token_usage.get('total_tokens', 0),
@@ -187,9 +200,6 @@ def convert_messages_to_langchain(
187200
"""
188201
Convert LaunchDarkly messages to LangChain messages.
189202
190-
This helper method enables developers to work directly with LangChain message types
191-
while maintaining compatibility with LaunchDarkly's standardized message format.
192-
193203
:param messages: List of LDMessage objects
194204
:return: List of LangChain message objects
195205
:raises ValueError: If an unsupported message role is encountered
@@ -211,10 +221,7 @@ def convert_messages_to_langchain(
211221
@staticmethod
212222
def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
213223
"""
214-
Create a LangChain model from an AI configuration.
215-
216-
This public helper method enables developers to initialize their own LangChain models
217-
using LaunchDarkly AI configurations.
224+
Create a LangChain model from a LaunchDarkly AI configuration.
218225
219226
:param ai_config: The LaunchDarkly AI configuration
220227
:return: A configured LangChain BaseChatModel
@@ -231,6 +238,7 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
231238

232239
return init_chat_model(
233240
model_name,
234-
model_provider=LangChainProvider.map_provider(provider),
241+
model_provider=LangChainRunnerFactory.map_provider(provider),
235242
**parameters,
236243
)
244+

packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from ldai import LDMessage
99

10-
from ldai_langchain import LangChainProvider
10+
from ldai_langchain import LangChainRunnerFactory
1111

1212

1313
class TestConvertMessagesToLangchain:
@@ -16,7 +16,7 @@ class TestConvertMessagesToLangchain:
1616
def test_converts_system_messages_to_system_message(self):
1717
"""Should convert system messages to SystemMessage."""
1818
messages = [LDMessage(role='system', content='You are a helpful assistant.')]
19-
result = LangChainProvider.convert_messages_to_langchain(messages)
19+
result = LangChainRunnerFactory.convert_messages_to_langchain(messages)
2020

2121
assert len(result) == 1
2222
assert isinstance(result[0], SystemMessage)
@@ -25,7 +25,7 @@ def test_converts_system_messages_to_system_message(self):
2525
def test_converts_user_messages_to_human_message(self):
2626
"""Should convert user messages to HumanMessage."""
2727
messages = [LDMessage(role='user', content='Hello, how are you?')]
28-
result = LangChainProvider.convert_messages_to_langchain(messages)
28+
result = LangChainRunnerFactory.convert_messages_to_langchain(messages)
2929

3030
assert len(result) == 1
3131
assert isinstance(result[0], HumanMessage)
@@ -34,7 +34,7 @@ def test_converts_user_messages_to_human_message(self):
3434
def test_converts_assistant_messages_to_ai_message(self):
3535
"""Should convert assistant messages to AIMessage."""
3636
messages = [LDMessage(role='assistant', content='I am doing well, thank you!')]
37-
result = LangChainProvider.convert_messages_to_langchain(messages)
37+
result = LangChainRunnerFactory.convert_messages_to_langchain(messages)
3838

3939
assert len(result) == 1
4040
assert isinstance(result[0], AIMessage)
@@ -47,7 +47,7 @@ def test_converts_multiple_messages_in_order(self):
4747
LDMessage(role='user', content='What is the weather like?'),
4848
LDMessage(role='assistant', content='I cannot check the weather.'),
4949
]
50-
result = LangChainProvider.convert_messages_to_langchain(messages)
50+
result = LangChainRunnerFactory.convert_messages_to_langchain(messages)
5151

5252
assert len(result) == 3
5353
assert isinstance(result[0], SystemMessage)
@@ -62,11 +62,11 @@ class MockMessage:
6262
content = 'Test message'
6363

6464
with pytest.raises(ValueError, match='Unsupported message role: unknown'):
65-
LangChainProvider.convert_messages_to_langchain([MockMessage()]) # type: ignore
65+
LangChainRunnerFactory.convert_messages_to_langchain([MockMessage()]) # type: ignore
6666

6767
def test_handles_empty_message_array(self):
6868
"""Should handle empty message array."""
69-
result = LangChainProvider.convert_messages_to_langchain([])
69+
result = LangChainRunnerFactory.convert_messages_to_langchain([])
7070
assert len(result) == 0
7171

7272

@@ -84,7 +84,7 @@ def test_creates_metrics_with_success_true_and_token_usage(self):
8484
},
8585
}
8686

87-
result = LangChainProvider.get_ai_metrics_from_response(mock_response)
87+
result = LangChainRunnerFactory.get_ai_metrics_from_response(mock_response)
8888

8989
assert result.success is True
9090
assert result.usage is not None
@@ -103,7 +103,7 @@ def test_creates_metrics_with_snake_case_token_usage(self):
103103
},
104104
}
105105

106-
result = LangChainProvider.get_ai_metrics_from_response(mock_response)
106+
result = LangChainRunnerFactory.get_ai_metrics_from_response(mock_response)
107107

108108
assert result.success is True
109109
assert result.usage is not None
@@ -115,7 +115,7 @@ def test_creates_metrics_with_success_true_and_no_usage_when_metadata_missing(se
115115
"""Should create metrics with success=True and no usage when metadata is missing."""
116116
mock_response = AIMessage(content='Test response')
117117

118-
result = LangChainProvider.get_ai_metrics_from_response(mock_response)
118+
result = LangChainRunnerFactory.get_ai_metrics_from_response(mock_response)
119119

120120
assert result.success is True
121121
assert result.usage is None
@@ -126,15 +126,15 @@ class TestMapProvider:
126126

127127
def test_maps_gemini_to_google_genai(self):
128128
"""Should map gemini to google-genai."""
129-
assert LangChainProvider.map_provider('gemini') == 'google-genai'
130-
assert LangChainProvider.map_provider('Gemini') == 'google-genai'
131-
assert LangChainProvider.map_provider('GEMINI') == 'google-genai'
129+
assert LangChainRunnerFactory.map_provider('gemini') == 'google-genai'
130+
assert LangChainRunnerFactory.map_provider('Gemini') == 'google-genai'
131+
assert LangChainRunnerFactory.map_provider('GEMINI') == 'google-genai'
132132

133133
def test_returns_provider_name_unchanged_for_unmapped_providers(self):
134134
"""Should return provider name unchanged for unmapped providers."""
135-
assert LangChainProvider.map_provider('openai') == 'openai'
136-
assert LangChainProvider.map_provider('anthropic') == 'anthropic'
137-
assert LangChainProvider.map_provider('unknown') == 'unknown'
135+
assert LangChainRunnerFactory.map_provider('openai') == 'openai'
136+
assert LangChainRunnerFactory.map_provider('anthropic') == 'anthropic'
137+
assert LangChainRunnerFactory.map_provider('unknown') == 'unknown'
138138

139139

140140
class TestInvokeModel:
@@ -150,7 +150,7 @@ async def test_returns_success_true_for_string_content(self, mock_llm):
150150
"""Should return success=True for string content."""
151151
mock_response = AIMessage(content='Test response')
152152
mock_llm.ainvoke = AsyncMock(return_value=mock_response)
153-
provider = LangChainProvider(mock_llm)
153+
provider = LangChainRunnerFactory(mock_llm)
154154

155155
messages = [LDMessage(role='user', content='Hello')]
156156
result = await provider.invoke_model(messages)
@@ -163,7 +163,7 @@ async def test_returns_success_false_for_non_string_content_and_logs_warning(sel
163163
"""Should return success=False for non-string content and log warning."""
164164
mock_response = AIMessage(content=[{'type': 'image', 'data': 'base64data'}])
165165
mock_llm.ainvoke = AsyncMock(return_value=mock_response)
166-
provider = LangChainProvider(mock_llm)
166+
provider = LangChainRunnerFactory(mock_llm)
167167

168168
messages = [LDMessage(role='user', content='Hello')]
169169
result = await provider.invoke_model(messages)
@@ -176,7 +176,7 @@ async def test_returns_success_false_when_model_invocation_throws_error(self, mo
176176
"""Should return success=False when model invocation throws an error."""
177177
error = Exception('Model invocation failed')
178178
mock_llm.ainvoke = AsyncMock(side_effect=error)
179-
provider = LangChainProvider(mock_llm)
179+
provider = LangChainRunnerFactory(mock_llm)
180180

181181
messages = [LDMessage(role='user', content='Hello')]
182182
result = await provider.invoke_model(messages)
@@ -201,7 +201,7 @@ async def test_returns_success_true_for_successful_invocation(self, mock_llm):
201201
mock_structured_llm = MagicMock()
202202
mock_structured_llm.ainvoke = AsyncMock(return_value=mock_response)
203203
mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm)
204-
provider = LangChainProvider(mock_llm)
204+
provider = LangChainRunnerFactory(mock_llm)
205205

206206
messages = [LDMessage(role='user', content='Hello')]
207207
response_structure = {'type': 'object', 'properties': {}}
@@ -217,7 +217,7 @@ async def test_returns_success_false_when_structured_model_invocation_throws_err
217217
mock_structured_llm = MagicMock()
218218
mock_structured_llm.ainvoke = AsyncMock(side_effect=error)
219219
mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm)
220-
provider = LangChainProvider(mock_llm)
220+
provider = LangChainRunnerFactory(mock_llm)
221221

222222
messages = [LDMessage(role='user', content='Hello')]
223223
response_structure = {'type': 'object', 'properties': {}}
@@ -236,7 +236,7 @@ class TestGetChatModel:
236236
def test_returns_underlying_llm(self):
237237
"""Should return the underlying LLM."""
238238
mock_llm = MagicMock()
239-
provider = LangChainProvider(mock_llm)
239+
provider = LangChainRunnerFactory(mock_llm)
240240

241241
assert provider.get_chat_model() is mock_llm
242242

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1-
"""LaunchDarkly AI SDK OpenAI Provider."""
1+
"""LaunchDarkly AI SDK OpenAI Connector."""
22

3-
from ldai_openai.openai_provider import OpenAIProvider
3+
from ldai_openai.openai_runner_factory import OpenAIProvider
44

5-
__all__ = ['OpenAIProvider']
5+
__all__ = [
6+
'OpenAIProvider',
7+
]

0 commit comments

Comments
 (0)