@@ -14,7 +14,7 @@ The pipeline wrapper provides a flexible foundation for deploying Haystack pipel
1414
1515``` python
1616from pathlib import Path
17- from typing import List, Generator, Union, AsyncGenerator
17+ from typing import Generator, Union, AsyncGenerator
1818from haystack import Pipeline, AsyncPipeline
1919from hayhooks import BasePipelineWrapper, get_last_user_message, streaming_generator, async_streaming_generator
2020
@@ -23,7 +23,7 @@ class PipelineWrapper(BasePipelineWrapper):
2323 pipeline_yaml = (Path(__file__ ).parent / " pipeline.yml" ).read_text()
2424 self .pipeline = Pipeline.loads(pipeline_yaml)
2525
26- def run_api (self , urls : List [str ], question : str ) -> str :
26+ def run_api (self , urls : list [str ], question : str ) -> str :
2727 result = self .pipeline.run({" fetcher" : {" urls" : urls}, " prompt" : {" query" : question}})
2828 return result[" llm" ][" replies" ][0 ]
2929```
@@ -108,7 +108,7 @@ def setup(self) -> None:
108108The ` run_api() ` method is called for each API request to the ` {pipeline_name}/run ` endpoint.
109109
110110``` python
111- def run_api (self , urls : List [str ], question : str ) -> str :
111+ def run_api (self , urls : list [str ], question : str ) -> str :
112112 result = self .pipeline.run({" fetcher" : {" urls" : urls}, " prompt" : {" query" : question}})
113113 return result[" llm" ][" replies" ][0 ]
114114```
@@ -123,9 +123,9 @@ def run_api(self, urls: List[str], question: str) -> str:
123123** Input argument rules:**
124124
125125- Arguments must be JSON-serializable
126- - Use proper type hints (` List [str]` , ` Optional[int] ` , etc.)
126+ - Use proper type hints (` list [str]` , ` Optional[int] ` , etc.)
127127- Default values are supported
128- - Complex types like ` Dict [str, Any]` are allowed
128+ - Complex types like ` dict [str, Any]` are allowed
129129
130130## Optional Methods
131131
@@ -134,7 +134,7 @@ def run_api(self, urls: List[str], question: str) -> str:
134134The asynchronous version of ` run_api() ` for better performance under high load.
135135
136136``` python
137- async def run_api_async (self , urls : List [str ], question : str ) -> str :
137+ async def run_api_async (self , urls : list [str ], question : str ) -> str :
138138 result = await self .pipeline.run_async({" fetcher" : {" urls" : urls}, " prompt" : {" query" : question}})
139139 return result[" llm" ][" replies" ][0 ]
140140```
@@ -151,7 +151,7 @@ async def run_api_async(self, urls: List[str], question: str) -> str:
151151Enable OpenAI-compatible chat endpoints for integration with chat interfaces.
152152
153153``` python
154- def run_chat_completion (self , model : str , messages : List [dict ], body : dict ) -> Union[str , Generator]:
154+ def run_chat_completion (self , model : str , messages : list [dict ], body : dict ) -> Union[str , Generator]:
155155 question = get_last_user_message(messages)
156156 result = self .pipeline.run({" prompt" : {" query" : question}})
157157 return result[" llm" ][" replies" ][0 ]
@@ -168,7 +168,7 @@ def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> U
168168Async version of chat completion with streaming support.
169169
170170``` python
171- async def run_chat_completion_async (self , model : str , messages : List [dict ], body : dict ) -> AsyncGenerator:
171+ async def run_chat_completion_async (self , model : str , messages : list [dict ], body : dict ) -> AsyncGenerator:
172172 question = get_last_user_message(messages)
173173 return async_streaming_generator(
174174 pipeline = self .pipeline,
@@ -191,7 +191,7 @@ Some Haystack components only support synchronous streaming callbacks and don't
191191By default, ` async_streaming_generator ` requires all streaming components to support async callbacks:
192192
193193``` python
194- async def run_chat_completion_async (self , model : str , messages : List [dict ], body : dict ) -> AsyncGenerator:
194+ async def run_chat_completion_async (self , model : str , messages : list [dict ], body : dict ) -> AsyncGenerator:
195195 # This will FAIL if pipeline contains OpenAIGenerator
196196 return async_streaming_generator(
197197 pipeline = self .pipeline, # AsyncPipeline with OpenAIGenerator
@@ -369,7 +369,7 @@ class MultiLLMWrapper(BasePipelineWrapper):
369369 self .pipeline.connect(" llm_1.replies" , " prompt_2.previous_response" )
370370 self .pipeline.connect(" prompt_2.prompt" , " llm_2.messages" )
371371
372- def run_chat_completion (self , model : str , messages : List [dict ], body : dict ) -> Generator:
372+ def run_chat_completion (self , model : str , messages : list [dict ], body : dict ) -> Generator:
373373 question = get_last_user_message(messages)
374374
375375 # By default, only llm_2 (the last streaming component) will stream
@@ -386,7 +386,7 @@ class MultiLLMWrapper(BasePipelineWrapper):
386386For advanced use cases where you want to see outputs from multiple components, use the ` streaming_components ` parameter:
387387
388388``` python
389- def run_chat_completion (self , model : str , messages : List [dict ], body : dict ) -> Generator:
389+ def run_chat_completion (self , model : str , messages : list [dict ], body : dict ) -> Generator:
390390 question = get_last_user_message(messages)
391391
392392 # Enable streaming for BOTH LLMs
@@ -594,9 +594,9 @@ Hayhooks can handle file uploads by adding a `files` parameter:
594594
595595``` python
596596from fastapi import UploadFile
597- from typing import Optional, List
597+ from typing import Optional
598598
599- def run_api (self , files : Optional[List [UploadFile]] = None , query : str = " " ) -> str :
599+ def run_api (self , files : Optional[list [UploadFile]] = None , query : str = " " ) -> str :
600600 if files:
601601 # Process uploaded files
602602 filenames = [f.filename for f in files if f.filename]
@@ -641,7 +641,7 @@ Your pipeline wrapper may require additional dependencies:
641641# pipeline_wrapper.py
642642import trafilatura # Additional dependency
643643
644- def run_api (self , urls : List [str ], question : str ) -> str :
644+ def run_api (self , urls : list [str ], question : str ) -> str :
645645 # Use additional library
646646 content = trafilatura.fetch(urls[0 ])
647647 # ... rest of pipeline logic
@@ -709,7 +709,7 @@ class PipelineWrapper(BasePipelineWrapper):
709709Use docstrings to provide MCP tool descriptions:
710710
711711``` python
712- def run_api (self , urls : List [str ], question : str ) -> str :
712+ def run_api (self , urls : list [str ], question : str ) -> str :
713713 """
714714 Ask questions about website content.
715715
0 commit comments