Skip to content

Commit

Permalink
Merge pull request #6 from steamship-core/doug/timeout-update
Browse files Browse the repository at this point in the history
add configurable wait time to generate
  • Loading branch information
douglas-reid authored Feb 8, 2023
2 parents f391162 + eeb5c11 commit 0a45bc9
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion src/steamship_langchain/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ class OpenAI(BaseLLM):
max_words: int = 256
n: int = 1
best_of: int = 1
batch_task_timeout_seconds: int = 10 * 60 # 10 minute limit on generation tasks

@property
def _llm_type(self) -> str:
Expand Down Expand Up @@ -106,7 +107,9 @@ def _batch(self, prompts: List[str], stop: Optional[List[str]] = None) -> List[G
try:
prompt_file = File.create(client=self.client, blocks=blocks)
task = llm_plugin.tag(doc=prompt_file)
task.wait() # TODO(douglas-reid): put in timeout, based on configuration
# the llm_plugin handles retries and backoff. this wait()
# will allow for that to happen.
task.wait(max_timeout_s=self.batch_task_timeout_seconds)
generation_file = task.output.file

for text_block in generation_file.blocks:
Expand Down

0 comments on commit 0a45bc9

Please sign in to comment.