Skip to content

Commit 8e7cb3e

Browse files
authored
chore: Use builtins for list and dict where possible (#186)
* Use builtins for list and dict where possible * fix linting
1 parent 19830f4 commit 8e7cb3e

File tree

9 files changed

+46
-46
lines changed

9 files changed

+46
-46
lines changed

docs/concepts/pipeline-wrapper.md

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ The pipeline wrapper provides a flexible foundation for deploying Haystack pipel
1414

1515
```python
1616
from pathlib import Path
17-
from typing import List, Generator, Union, AsyncGenerator
17+
from typing import Generator, Union, AsyncGenerator
1818
from haystack import Pipeline, AsyncPipeline
1919
from hayhooks import BasePipelineWrapper, get_last_user_message, streaming_generator, async_streaming_generator
2020

@@ -23,7 +23,7 @@ class PipelineWrapper(BasePipelineWrapper):
2323
pipeline_yaml = (Path(__file__).parent / "pipeline.yml").read_text()
2424
self.pipeline = Pipeline.loads(pipeline_yaml)
2525

26-
def run_api(self, urls: List[str], question: str) -> str:
26+
def run_api(self, urls: list[str], question: str) -> str:
2727
result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}})
2828
return result["llm"]["replies"][0]
2929
```
@@ -108,7 +108,7 @@ def setup(self) -> None:
108108
The `run_api()` method is called for each API request to the `{pipeline_name}/run` endpoint.
109109

110110
```python
111-
def run_api(self, urls: List[str], question: str) -> str:
111+
def run_api(self, urls: list[str], question: str) -> str:
112112
result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}})
113113
return result["llm"]["replies"][0]
114114
```
@@ -123,9 +123,9 @@ def run_api(self, urls: List[str], question: str) -> str:
123123
**Input argument rules:**
124124

125125
- Arguments must be JSON-serializable
126-
- Use proper type hints (`List[str]`, `Optional[int]`, etc.)
126+
- Use proper type hints (`list[str]`, `Optional[int]`, etc.)
127127
- Default values are supported
128-
- Complex types like `Dict[str, Any]` are allowed
128+
- Complex types like `dict[str, Any]` are allowed
129129

130130
## Optional Methods
131131

@@ -134,7 +134,7 @@ def run_api(self, urls: List[str], question: str) -> str:
134134
The asynchronous version of `run_api()` for better performance under high load.
135135

136136
```python
137-
async def run_api_async(self, urls: List[str], question: str) -> str:
137+
async def run_api_async(self, urls: list[str], question: str) -> str:
138138
result = await self.pipeline.run_async({"fetcher": {"urls": urls}, "prompt": {"query": question}})
139139
return result["llm"]["replies"][0]
140140
```
@@ -151,7 +151,7 @@ async def run_api_async(self, urls: List[str], question: str) -> str:
151151
Enable OpenAI-compatible chat endpoints for integration with chat interfaces.
152152

153153
```python
154-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
154+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Union[str, Generator]:
155155
question = get_last_user_message(messages)
156156
result = self.pipeline.run({"prompt": {"query": question}})
157157
return result["llm"]["replies"][0]
@@ -168,7 +168,7 @@ def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> U
168168
Async version of chat completion with streaming support.
169169

170170
```python
171-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
171+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
172172
question = get_last_user_message(messages)
173173
return async_streaming_generator(
174174
pipeline=self.pipeline,
@@ -191,7 +191,7 @@ Some Haystack components only support synchronous streaming callbacks and don't
191191
By default, `async_streaming_generator` requires all streaming components to support async callbacks:
192192

193193
```python
194-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
194+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
195195
# This will FAIL if pipeline contains OpenAIGenerator
196196
return async_streaming_generator(
197197
pipeline=self.pipeline, # AsyncPipeline with OpenAIGenerator
@@ -369,7 +369,7 @@ class MultiLLMWrapper(BasePipelineWrapper):
369369
self.pipeline.connect("llm_1.replies", "prompt_2.previous_response")
370370
self.pipeline.connect("prompt_2.prompt", "llm_2.messages")
371371

372-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Generator:
372+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Generator:
373373
question = get_last_user_message(messages)
374374

375375
# By default, only llm_2 (the last streaming component) will stream
@@ -386,7 +386,7 @@ class MultiLLMWrapper(BasePipelineWrapper):
386386
For advanced use cases where you want to see outputs from multiple components, use the `streaming_components` parameter:
387387

388388
```python
389-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Generator:
389+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Generator:
390390
question = get_last_user_message(messages)
391391

392392
# Enable streaming for BOTH LLMs
@@ -594,9 +594,9 @@ Hayhooks can handle file uploads by adding a `files` parameter:
594594

595595
```python
596596
from fastapi import UploadFile
597-
from typing import Optional, List
597+
from typing import Optional
598598

599-
def run_api(self, files: Optional[List[UploadFile]] = None, query: str = "") -> str:
599+
def run_api(self, files: Optional[list[UploadFile]] = None, query: str = "") -> str:
600600
if files:
601601
# Process uploaded files
602602
filenames = [f.filename for f in files if f.filename]
@@ -641,7 +641,7 @@ Your pipeline wrapper may require additional dependencies:
641641
# pipeline_wrapper.py
642642
import trafilatura # Additional dependency
643643

644-
def run_api(self, urls: List[str], question: str) -> str:
644+
def run_api(self, urls: list[str], question: str) -> str:
645645
# Use additional library
646646
content = trafilatura.fetch(urls[0])
647647
# ... rest of pipeline logic
@@ -709,7 +709,7 @@ class PipelineWrapper(BasePipelineWrapper):
709709
Use docstrings to provide MCP tool descriptions:
710710

711711
```python
712-
def run_api(self, urls: List[str], question: str) -> str:
712+
def run_api(self, urls: list[str], question: str) -> str:
713713
"""
714714
Ask questions about website content.
715715

docs/concepts/yaml-pipeline-deployment.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -271,17 +271,17 @@ with open("pipeline.yml", "w") as f:
271271
```python
272272
# For OpenAI compatibility
273273
class PipelineWrapper(BasePipelineWrapper):
274-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
274+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Union[str, Generator]:
275275
...
276276
277277
# For file uploads
278278
class PipelineWrapper(BasePipelineWrapper):
279-
def run_api(self, files: Optional[List[UploadFile]] = None, query: str = "") -> str:
279+
def run_api(self, files: Optional[list[UploadFile]] = None, query: str = "") -> str:
280280
...
281281
282282
# For streaming
283283
class PipelineWrapper(BasePipelineWrapper):
284-
def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
284+
def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
285285
...
286286
```
287287

docs/features/file-upload-support.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@ To accept file uploads in your pipeline, add a `files` parameter to your `run_ap
1717

1818
```python
1919
from fastapi import UploadFile
20-
from typing import Optional, List
20+
from typing import Optional
2121

2222
class PipelineWrapper(BasePipelineWrapper):
23-
def run_api(self, files: Optional[List[UploadFile]] = None, query: str = "") -> str:
23+
def run_api(self, files: Optional[list[UploadFile]] = None, query: str = "") -> str:
2424
if not files:
2525
return "No files provided"
2626

@@ -105,12 +105,12 @@ You can handle both files and parameters in the same request by adding them as a
105105

106106
```python
107107
from fastapi import UploadFile
108-
from typing import Optional, List
108+
from typing import Optional
109109

110110
class PipelineWrapper(BasePipelineWrapper):
111111
def run_api(
112112
self,
113-
files: Optional[List[UploadFile]] = None,
113+
files: Optional[list[UploadFile]] = None,
114114
query: str = "",
115115
additional_param: str = "default"
116116
) -> str:

docs/features/mcp-support.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ For each deployed pipeline, Hayhooks will:
135135

136136
```python
137137
from pathlib import Path
138-
from typing import List
139138
from haystack import Pipeline
140139
from hayhooks import BasePipelineWrapper
141140

@@ -145,7 +144,7 @@ class PipelineWrapper(BasePipelineWrapper):
145144
pipeline_yaml = (Path(__file__).parent / "chat_with_website.yml").read_text()
146145
self.pipeline = Pipeline.loads(pipeline_yaml)
147146

148-
def run_api(self, urls: List[str], question: str) -> str:
147+
def run_api(self, urls: list[str], question: str) -> str:
149148
#
150149
# NOTE: The following docstring will be used as MCP Tool description
151150
#
@@ -245,7 +244,7 @@ Configure Claude Desktop to connect to Hayhooks MCP Server:
245244
Use docstrings to provide better tool descriptions:
246245

247246
```python
248-
def run_api(self, urls: List[str], question: str) -> str:
247+
def run_api(self, urls: list[str], question: str) -> str:
249248
"""
250249
Ask questions about website content using AI.
251250
@@ -270,7 +269,7 @@ Hayhooks automatically validates inputs based on your method signature:
270269
```python
271270
def run_api(
272271
self,
273-
urls: List[str], # Required: List of URLs
272+
urls: list[str], # Required: List of URLs
274273
question: str, # Required: User question
275274
max_tokens: int = 1000 # Optional: Max tokens
276275
) -> str:

docs/features/openai-compatibility.md

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,8 @@ Hayhooks can automatically generate OpenAI-compatible endpoints if you implement
2222
### Basic Chat Completion
2323

2424
```python
25-
from typing import List, Union, Generator
25+
from pathlib import Path
26+
from typing import Union, Generator
2627
from haystack import Pipeline
2728
from hayhooks import get_last_user_message, BasePipelineWrapper, log
2829

@@ -32,7 +33,7 @@ class PipelineWrapper(BasePipelineWrapper):
3233
pipeline_yaml = (Path(__file__).parent / "pipeline.yml").read_text()
3334
self.pipeline = Pipeline.loads(pipeline_yaml)
3435

35-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
36+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Union[str, Generator]:
3637
log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}")
3738

3839
question = get_last_user_message(messages)
@@ -55,7 +56,7 @@ class PipelineWrapper(BasePipelineWrapper):
5556
pipeline_yaml = (Path(__file__).parent / "pipeline.yml").read_text()
5657
self.pipeline = AsyncPipeline.loads(pipeline_yaml)
5758

58-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
59+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
5960
log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}")
6061

6162
question = get_last_user_message(messages)
@@ -73,7 +74,7 @@ class PipelineWrapper(BasePipelineWrapper):
7374
### run_chat_completion(...)
7475

7576
```python
76-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
77+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Union[str, Generator]:
7778
"""
7879
Run the pipeline for OpenAI-compatible chat completion.
7980
@@ -91,7 +92,7 @@ def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> U
9192
### run_chat_completion_async(...)
9293

9394
```python
94-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> Union[str, AsyncGenerator]:
95+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> Union[str, AsyncGenerator]:
9596
"""
9697
Async version of run_chat_completion.
9798
@@ -143,7 +144,7 @@ curl http://localhost:1416/v1/models
143144
```python
144145
from hayhooks import streaming_generator
145146

146-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Generator:
147+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Generator:
147148
question = get_last_user_message(messages)
148149

149150
return streaming_generator(
@@ -157,7 +158,7 @@ def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> G
157158
```python
158159
from hayhooks import async_streaming_generator
159160

160-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
161+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
161162
question = get_last_user_message(messages)
162163

163164
return async_streaming_generator(
@@ -217,7 +218,7 @@ class SyncChatWrapper(BasePipelineWrapper):
217218
self.pipeline.add_component("llm", llm)
218219
self.pipeline.connect("chat_prompt_builder.prompt", "llm.messages")
219220

220-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> str:
221+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> str:
221222
question = get_last_user_message(messages)
222223
result = self.pipeline.run({"chat_prompt_builder": {"query": question}})
223224
return result["llm"]["replies"][0].content
@@ -241,7 +242,7 @@ class AsyncStreamingWrapper(BasePipelineWrapper):
241242
self.pipeline.add_component("llm", llm)
242243
self.pipeline.connect("chat_prompt_builder.prompt", "llm.messages")
243244

244-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
245+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
245246
question = get_last_user_message(messages)
246247
return async_streaming_generator(
247248
pipeline=self.pipeline,
@@ -254,7 +255,7 @@ class AsyncStreamingWrapper(BasePipelineWrapper):
254255
The OpenAI-compatible endpoints support standard parameters from the `body` argument:
255256

256257
```python
257-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> str:
258+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> str:
258259
# Access additional parameters
259260
temperature = body.get("temperature", 0.7)
260261
max_tokens = body.get("max_tokens", 150)

docs/features/openwebui-integration.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,12 @@ Hayhooks supports sending events to Open WebUI for enhanced user experience:
107107
### Event Implementation
108108

109109
```python
110-
from typing import AsyncGenerator, List
110+
from typing import AsyncGenerator
111111
from hayhooks import async_streaming_generator, get_last_user_message, BasePipelineWrapper
112112
from hayhooks.open_webui import create_status_event, create_message_event, OpenWebUIEvent
113113

114114
class PipelineWrapper(BasePipelineWrapper):
115-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator[str | OpenWebUIEvent, None]:
115+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator[str | OpenWebUIEvent, None]:
116116
# Indicate loading
117117
yield create_status_event("Processing your request...", done=False)
118118

@@ -158,7 +158,7 @@ def on_tool_call_end(tool_name: str, arguments: dict, result: dict, error: bool)
158158

159159

160160
class PipelineWrapper(BasePipelineWrapper):
161-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Generator:
161+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Generator:
162162
return streaming_generator(
163163
pipeline=self.pipeline,
164164
pipeline_run_args={"messages": messages},
@@ -250,7 +250,7 @@ Here's a video example of how to deploy a Haystack pipeline from the `open-webui
250250
Here's a complete example for a website chat pipeline:
251251

252252
```python
253-
from typing import AsyncGenerator, List
253+
from typing import AsyncGenerator
254254
from haystack import Pipeline
255255
from haystack.components.fetchers import LinkContentFetcher
256256
from haystack.components.converters import HTMLToDocument
@@ -282,7 +282,7 @@ class PipelineWrapper(BasePipelineWrapper):
282282
self.pipeline.connect("converter.documents", "chat_prompt_builder.documents")
283283
self.pipeline.connect("chat_prompt_builder.prompt", "llm.messages")
284284

285-
async def run_chat_completion_async(self, model: str, messages: List[dict], body: dict) -> AsyncGenerator:
285+
async def run_chat_completion_async(self, model: str, messages: list[dict], body: dict) -> AsyncGenerator:
286286
question = get_last_user_message(messages)
287287

288288
# Extract URLs from messages or use defaults

examples/pipeline_wrappers/multi_llm_streaming/pipeline_wrapper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from collections.abc import Generator
2-
from typing import Any, List, Union # noqa: UP035
2+
from typing import Any, Union
33

44
from haystack import Pipeline
55
from haystack.components.builders import ChatPromptBuilder
@@ -92,7 +92,7 @@ def run_api(self, query: str) -> dict[str, Any]:
9292
)
9393
return {"reply": result["llm_2"]["replies"][0].text if result["llm_2"]["replies"] else ""}
9494

95-
def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]: # noqa: ARG002, UP006
95+
def run_chat_completion(self, model: str, messages: list[dict], body: dict) -> Union[str, Generator]: # noqa: ARG002
9696
"""
9797
Run the pipeline in streaming mode.
9898

src/hayhooks/callbacks.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,12 +48,12 @@ def default_on_tool_call_end(
4848
4949
Args:
5050
tool_name (str): The name of the tool that was called.
51-
arguments (Dict[str, Any]): The arguments that were passed to the tool.
51+
arguments (dict[str, Any]): The arguments that were passed to the tool.
5252
result (str): The result or response from the tool execution.
5353
error (bool): Whether the tool call resulted in an error.
5454
5555
Returns:
56-
List[Union[OpenWebUIEvent, str]]: A list of events to be processed by Open WebUI.
56+
list[Union[OpenWebUIEvent, str]]: A list of events to be processed by Open WebUI.
5757
For successful calls, returns a status event and a details tag with the tool's arguments and response.
5858
For failed calls, returns a hidden status event and an error notification.
5959
The list can contain both OpenWebUIEvent and str objects.

src/hayhooks/server/routers/deploy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
SAMPLE_PIPELINE_FILES = {
1919
"pipeline_wrapper.py": (
20-
"from typing import Dict, Any\n\ndef process(data: Dict[str, Any]) -> Dict[str, Any]:\n "
20+
"from typing import Any\n\ndef process(data: dict[str, Any]) -> dict[str, Any]:\n "
2121
":# Your processing logic here\n return data"
2222
),
2323
"requirements.txt": "pandas==1.3.5\nnumpy==1.21.0",

0 commit comments

Comments
 (0)