Skip to content

Update README.md

Update README.md #167

GitHub Actions / Production Test Results failed Feb 14, 2024 in 0s

42 tests run, 27 passed, 2 skipped, 13 failed.

Annotations

Check failure on line 22 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_chat_openai

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...space_id='44A267E2-C142-471E-9BC3-2E4891D68CEE', workspace_handle='prime-land-zp1x9', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_chat_openai(client: Steamship) -> None:
        """Test ChatOpenAI wrapper."""
        chat = ChatOpenAI(client=client, max_tokens=10)
        message = HumanMessage(content="Hello")
>       response = chat([message])

tests/chat_models/test_openai.py:22: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:195: in __call__
    generation = self.generate(
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:95: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:87: in generate
    results = [
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:88: in <listcomp>
    self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
src/steamship_langchain/chat_models/openai.py:223: in _generate
    messages = self._complete(messages=message_dicts, **params)
src/steamship_langchain/chat_models/openai.py:202: in _complete
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...space_id='44A267E2-C142-471E-9BC3-2E4891D68CEE', workspace_handle='prime-land-zp1x9', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='119526DB-38B3-488D-A305-91165AA493DF')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 33 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_chat_openai_system_message

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...space_id='6F474A75-C9CD-4B6F-973D-FA6380AD0CD4', workspace_handle='needed-fog-x5j0s', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_chat_openai_system_message(client: Steamship) -> None:
        """Test ChatOpenAI wrapper with system message."""
        chat = ChatOpenAI(client=client, max_tokens=10)
        system_message = SystemMessage(content="You are to chat with the user.")
        human_message = HumanMessage(content="Hello")
>       response = chat([system_message, human_message])

tests/chat_models/test_openai.py:33: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:195: in __call__
    generation = self.generate(
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:95: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:87: in generate
    results = [
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:88: in <listcomp>
    self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
src/steamship_langchain/chat_models/openai.py:223: in _generate
    messages = self._complete(messages=message_dicts, **params)
src/steamship_langchain/chat_models/openai.py:202: in _complete
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...space_id='6F474A75-C9CD-4B6F-973D-FA6380AD0CD4', workspace_handle='needed-fog-x5j0s', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='D4F2FA85-B640-4780-859E-146AF6A03C1A')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 43 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_chat_openai_generate

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='11767CA6-855D-40AC-B3D1-8D4254E533CB', workspace_handle='expert-bike-c6wd1', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_chat_openai_generate(client: Steamship) -> None:
        """Test ChatOpenAI wrapper with generate."""
        chat = ChatOpenAI(client=client, max_tokens=10, n=2)
        message = HumanMessage(content="Hello")
>       response = chat.generate([[message], [message]])

tests/chat_models/test_openai.py:43: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:95: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:87: in generate
    results = [
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:88: in <listcomp>
    self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
src/steamship_langchain/chat_models/openai.py:223: in _generate
    messages = self._complete(messages=message_dicts, **params)
src/steamship_langchain/chat_models/openai.py:202: in _complete
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='11767CA6-855D-40AC-B3D1-8D4254E533CB', workspace_handle='expert-bike-c6wd1', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='9751DCA3-8B2A-40DF-A15C-E9D9C2291DC9')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 59 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_chat_openai_multiple_completions

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='4FDF07E3-3ED6-4221-9307-A4A1DEE845A6', workspace_handle='prompt-leaf-94b9g', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_chat_openai_multiple_completions(client: Steamship) -> None:
        """Test ChatOpenAI wrapper with multiple completions."""
        chat = ChatOpenAI(client=client, max_tokens=10, n=5)
        message = HumanMessage(content="Hello")
>       response = chat._generate([message])

tests/chat_models/test_openai.py:59: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
src/steamship_langchain/chat_models/openai.py:223: in _generate
    messages = self._complete(messages=message_dicts, **params)
src/steamship_langchain/chat_models/openai.py:202: in _complete
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='4FDF07E3-3ED6-4221-9307-A4A1DEE845A6', workspace_handle='prompt-leaf-94b9g', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='E29FA2A2-44F1-4F3E-ACC4-F2851193E973')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 72 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_chat_openai_llm_output_contains_model_name

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...space_id='7E38BE95-2191-4F01-9775-C6E74DCE08CC', workspace_handle='rare-prize-bkizv', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_chat_openai_llm_output_contains_model_name(client: Steamship) -> None:
        """Test llm_output contains model_name."""
        chat = ChatOpenAI(client=client, max_tokens=10)
        message = HumanMessage(content="Hello")
>       llm_result = chat.generate([[message]])

tests/chat_models/test_openai.py:72: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:95: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:87: in generate
    results = [
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:88: in <listcomp>
    self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
src/steamship_langchain/chat_models/openai.py:223: in _generate
    messages = self._complete(messages=message_dicts, **params)
src/steamship_langchain/chat_models/openai.py:202: in _complete
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...space_id='7E38BE95-2191-4F01-9775-C6E74DCE08CC', workspace_handle='rare-prize-bkizv', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='0688D975-A8C8-4360-8B04-FBE2BE7BF00C')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 82 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_chat_openai_streaming_llm_output_contains_model_name

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ce_id='95B5CF2F-26F1-4A5E-8D5D-F5D86BF8A3E5', workspace_handle='merry-tsunami-4dyaj', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_chat_openai_streaming_llm_output_contains_model_name(client: Steamship) -> None:
        """Test llm_output contains model_name."""
        chat = ChatOpenAI(client=client, max_tokens=10, streaming=True)
        message = HumanMessage(content="Hello")
>       llm_result = chat.generate([[message]])

tests/chat_models/test_openai.py:82: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:95: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:87: in generate
    results = [
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/chat_models/base.py:88: in <listcomp>
    self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
src/steamship_langchain/chat_models/openai.py:223: in _generate
    messages = self._complete(messages=message_dicts, **params)
src/steamship_langchain/chat_models/openai.py:202: in _complete
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ce_id='95B5CF2F-26F1-4A5E-8D5D-F5D86BF8A3E5', workspace_handle='merry-tsunami-4dyaj', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='C861EBD9-FEED-45DD-997B-4394B224B26A')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 21 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai

AssertionError: assert 'Generation failed.' == 'Hello'
  - Hello
  + Generation failed.
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...'F9AA0672-0916-4324-B114-9BE86C5315B8', workspace_handle='resilient-resonance-i9008', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai(client: Steamship):
        """Basic tests of the OpenAI plugin wrapper."""
        llm_under_test = OpenAI(client=client, temperature=0)
    
        # simple prompt
        prompt = "Please respond with a simple 'Hello'"
        generated = llm_under_test(prompt=prompt)
        assert len(generated) != 0
>       assert generated.strip() == "Hello"
E       AssertionError: assert 'Generation failed.' == 'Hello'
E         - Hello
E         + Generation failed.

tests/llms/test_openai.py:21: AssertionError

Check failure on line 54 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_openai_batching

AssertionError: assert 1 == 30
 +  where 1 = len([[Generation(text='Generation failed.', generation_info=None)]])
 +    where [[Generation(text='Generation failed.', generation_info=None)]] = LLMResult(generations=[[Generation(text='Generation failed.', generation_info=None)]], llm_output={'token_usage': {}}, run=RunInfo(run_id=UUID('379db9b7-9ebc-4bd5-b37c-0fc95b0dd511'))).generations
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ace_id='516EEC6E-F36F-447D-A165-D724B92A54F5', workspace_handle='wooden-waves-kzhve', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai_batching(client: Steamship):
        """Basic tests of the OpenAI plugin wrapper batching behavior."""
    
        # single option generation
        llm_under_test = OpenAI(client=client, temperature=0)
    
        # batched prompts -- needs to exceed the max batch_size (of 20)
        prompts = ["Tell me a joke", "Tell me a poem"] * 15
        generated = llm_under_test.generate(prompts=prompts)
        assert len(generated.generations) != 0
>       assert len(generated.generations) == 30
E       AssertionError: assert 1 == 30
E        +  where 1 = len([[Generation(text='Generation failed.', generation_info=None)]])
E        +    where [[Generation(text='Generation failed.', generation_info=None)]] = LLMResult(generations=[[Generation(text='Generation failed.', generation_info=None)]], llm_output={'token_usage': {}}, run=RunInfo(run_id=UUID('379db9b7-9ebc-4bd5-b37c-0fc95b0dd511'))).generations

tests/llms/test_openai.py:54: AssertionError

Check failure on line 66 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_openai_multiple_completions

AssertionError: assert 1 == 10
 +  where 1 = len([[Generation(text='Generation failed.', generation_info=None)]])
 +    where [[Generation(text='Generation failed.', generation_info=None)]] = LLMResult(generations=[[Generation(text='Generation failed.', generation_info=None)]], llm_output={'token_usage': {}}, run=RunInfo(run_id=UUID('7c011478-03a2-4ca4-973e-09d6cb444e75'))).generations
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='EB10AFD3-4146-417C-A14D-0F0A1B145856', workspace_handle='useful-moon-2tdt3', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai_multiple_completions(client: Steamship):
        """Basic tests of the OpenAI plugin wrapper number of completions behavior."""
    
        llm_under_test = OpenAI(client=client, temperature=0.8, n=3, best_of=3)
    
        prompts = ["Tell me a joke", "Tell me a poem"] * 5
        generated = llm_under_test.generate(prompts=prompts)
        assert len(generated.generations) != 0
>       assert len(generated.generations) == 10
E       AssertionError: assert 1 == 10
E        +  where 1 = len([[Generation(text='Generation failed.', generation_info=None)]])
E        +    where [[Generation(text='Generation failed.', generation_info=None)]] = LLMResult(generations=[[Generation(text='Generation failed.', generation_info=None)]], llm_output={'token_usage': {}}, run=RunInfo(run_id=UUID('7c011478-03a2-4ca4-973e-09d6cb444e75'))).generations

tests/llms/test_openai.py:66: AssertionError

Check failure on line 130 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_openai_chat_llm

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ce_id='DE6072FF-366A-43DA-A0AB-6066D1D7099D', workspace_handle='august-winter-17akq', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai_chat_llm(client: Steamship) -> None:
        """Test Chat version of the LLM"""
        llm = OpenAIChat(client=client)
>       llm_result = llm.generate(
            prompts=["Please print the words of the Pledge of Allegiance"], stop=["flag", "Flag"]
        )

tests/llms/test_openai.py:130: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:203: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:199: in generate
    else self._generate(prompts, stop=stop, **kwargs)
src/steamship_langchain/llms/openai.py:285: in _generate
    generated_text = self._completion(messages=messages, **params)
src/steamship_langchain/llms/openai.py:280: in _completion
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ce_id='DE6072FF-366A-43DA-A0AB-6066D1D7099D', workspace_handle='august-winter-17akq', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='28C95652-C910-4C9B-A66A-BB7439CB5EFD')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 152 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_openai_chat_llm_with_prefixed_messages

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='D4DB3BD7-A459-4C64-88BD-49117EFE2012', workspace_handle='social-bath-2xi4k', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai_chat_llm_with_prefixed_messages(client: Steamship) -> None:
        """Test Chat version of the LLM"""
        messages = [
            {
                "role": "system",
                "content": "You are EchoGPT. For every prompt you receive, you reply with the exact same text.",
            },
            {"role": "user", "content": "This is a test."},
            {"role": "assistant", "content": "This is a test."},
        ]
        llm = OpenAIChat(client=client, prefix_messages=messages)
>       llm_result = llm.generate(prompts=["What is the meaning of life?"])

tests/llms/test_openai.py:152: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:203: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:199: in generate
    else self._generate(prompts, stop=stop, **kwargs)
src/steamship_langchain/llms/openai.py:285: in _generate
    generated_text = self._completion(messages=messages, **params)
src/steamship_langchain/llms/openai.py:280: in _completion
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...pace_id='D4DB3BD7-A459-4C64-88BD-49117EFE2012', workspace_handle='social-bath-2xi4k', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='47347B4D-D6BE-4CF0-86F1-C5897D7B14DD')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 172 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_openai_llm_with_chat_model_init

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ace_id='064F84CB-80B5-46F7-BA29-41EE131B71B3', workspace_handle='wanted-coast-fi97b', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai_llm_with_chat_model_init(client: Steamship) -> None:
        """Test Chat version of the LLM, with old init style"""
        messages = [
            {
                "role": "system",
                "content": "You are EchoGPT. For every prompt you receive, you reply with the exact same text.",
            },
            {"role": "user", "content": "This is a test."},
            {"role": "assistant", "content": "This is a test."},
        ]
        llm = OpenAI(client=client, prefix_messages=messages, model_name="gpt-4")
>       llm_result = llm.generate(prompts=["What is the meaning of life?"])

tests/llms/test_openai.py:172: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:203: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:199: in generate
    else self._generate(prompts, stop=stop, **kwargs)
src/steamship_langchain/llms/openai.py:285: in _generate
    generated_text = self._completion(messages=messages, **params)
src/steamship_langchain/llms/openai.py:280: in _completion
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ace_id='064F84CB-80B5-46F7-BA29-41EE131B71B3', workspace_handle='wanted-coast-fi97b', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='0F0766B1-BDB6-459B-B692-D7FC01BACC33')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError

Check failure on line 193 in tests/chat_models/test_openai.py

See this annotation in the file changed.

@github-actions github-actions / Production Test Results

test_openai.test_openai_large_context

steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options
Raw output
client = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ace_id='7477A0F6-97B8-4CFC-9C86-E4E0FD0E78F3', workspace_handle='real-volcano-duzgq', profile='test', request_id=None))

    @pytest.mark.usefixtures("client")
    def test_openai_large_context(client: Steamship):
        """Basic tests of the OpenAIChat plugin wrapper for large context models."""
    
        llm_under_test = OpenAIChat(client=client, model_name="gpt-3.5-turbo-16k", temperature=0.8)
    
        long_prompt = (
            'Complete the following short story. The child screamed "'
            + "AHHHHH" * 5000
            + '" when they saw the'
        )
    
        prompts = [long_prompt]
>       generated = llm_under_test.generate(prompts=prompts)

tests/llms/test_openai.py:193: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:203: in generate
    raise e
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:199: in generate
    else self._generate(prompts, stop=stop, **kwargs)
src/steamship_langchain/llms/openai.py:285: in _generate
    generated_text = self._completion(messages=messages, **params)
src/steamship_langchain/llms/openai.py:280: in _completion
    generate_task.wait()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:268: in wait
    self.refresh()
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/tasks.py:313: in refresh
    resp = self.client.post("task/status", payload=req, expect=self.expect)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:579: in post
    return self.call(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Steamship(config=Configuration(api_key=SecretStr('**********'), api_base=AnyHttpUrl('https://api.steamship.com/api/v1/...ace_id='7477A0F6-97B8-4CFC-9C86-E4E0FD0E78F3', workspace_handle='real-volcano-duzgq', profile='test', request_id=None))
verb = <Verb.POST: 'POST'>, operation = 'task/status'
payload = TaskStatusRequest(task_id='DF3C279A-2E88-4951-8748-EC49C78C486F')
file = None
expect = <class 'steamship.data.operations.generator.GenerateResponse'>
debug = False, raw_response = False, is_package_call = False
package_owner = None, package_id = None, package_instance_id = None
as_background_task = False, wait_on_tasks = None, timeout_s = None
task_delay_ms = None

    def call(  # noqa: C901
        self,
        verb: Verb,
        operation: str,
        payload: Union[Request, dict, bytes] = None,
        file: Any = None,
        expect: Type[T] = None,
        debug: bool = False,
        raw_response: bool = False,
        is_package_call: bool = False,
        package_owner: str = None,
        package_id: str = None,
        package_instance_id: str = None,
        as_background_task: bool = False,
        wait_on_tasks: List[Union[str, Task]] = None,
        timeout_s: Optional[float] = None,
        task_delay_ms: Optional[int] = None,
    ) -> Union[
        Any, Task
    ]:  # TODO (enias): I would like to list all possible return types using interfaces instead of Any
        """Post to the Steamship API.
    
        All responses have the format::
    
        .. code-block:: json
    
           {
               "data": "<actual response>",
               "error": {"reason": "<message>"}
           } # noqa: RST203
    
        For the Python client we return the contents of the `data` field if present, and we raise an exception
        if the `error` field is filled in.
        """
        # TODO (enias): Review this codebase
        url = self._url(
            is_package_call=is_package_call,
            package_owner=package_owner,
            operation=operation,
        )
    
        headers = self._headers(
            is_package_call=is_package_call,
            package_owner=package_owner,
            package_id=package_id,
            package_instance_id=package_instance_id,
            as_background_task=as_background_task,
            wait_on_tasks=wait_on_tasks,
            task_delay_ms=task_delay_ms,
        )
    
        data = self._prepare_data(payload=payload)
    
        logging.debug(
            f"Making {verb} to {url} in workspace {self.config.workspace_handle}/{self.config.workspace_id}"
        )
        if verb == Verb.POST:
            if file is not None:
                files = self._prepare_multipart_data(data, file)
                resp = self._session.post(url, files=files, headers=headers, timeout=timeout_s)
            else:
                if isinstance(data, bytes):
                    resp = self._session.post(url, data=data, headers=headers, timeout=timeout_s)
                else:
                    resp = self._session.post(url, json=data, headers=headers, timeout=timeout_s)
        elif verb == Verb.GET:
            resp = self._session.get(url, params=data, headers=headers, timeout=timeout_s)
        else:
            raise Exception(f"Unsupported verb: {verb}")
    
        logging.debug(f"From {verb} to {url} got HTTP {resp.status_code}")
    
        if debug is True:
            logging.debug(f"Got response {resp}")
    
        response_data = self._response_data(resp, raw_response=raw_response)
    
        logging.debug(f"Response JSON {response_data}")
    
        task = None
        error = None
    
        if isinstance(response_data, dict):
            if "status" in response_data:
                try:
                    task = Task.parse_obj(
                        {**response_data["status"], "client": self, "expect": expect}
                    )
                    if "state" in response_data["status"]:
                        if response_data["status"]["state"] == "failed":
                            error = SteamshipError.from_dict(response_data["status"])
                            logging.warning(f"Client received error from server: {error}")
                except TypeError as e:
                    # There's an edge case here -- if a Steamship package returns the JSON dictionary
                    #
                    # { "status": "status string" }
                    #
                    # Then the above handler will attempt to parse it and throw... But we don't actually want to throw
                    # since we don't take a strong opinion on what the response type of a package endpoint ought to be.
                    # It *may* choose to conform to the SteamshipResponse<T> type, but it doesn't have to.
                    if not is_package_call:
                        raise e
    
                if task is not None and task.state == TaskState.failed:
                    error = task.as_error()
    
            if "data" in response_data:
                if expect is not None:
                    if issubclass(expect, SteamshipError):
                        data = expect.from_dict({**response_data["data"], "client": self})
                    elif issubclass(expect, BaseModel):
                        data = expect.parse_obj(
                            self._add_client_to_response(expect, response_data["data"])
                        )
                    else:
                        raise RuntimeError(f"obj of type {expect} does not have a from_dict method")
                else:
                    data = response_data["data"]
    
                if task:
                    task.output = data
            else:
                data = response_data
    
        else:
            data = response_data
    
        if error is not None:
            logging.warning(f"Client received error from server: {error}", exc_info=error)
>           raise error
E           steamship.base.error.SteamshipError: [ERROR - POST /determineOutputBlockTypes] Model may not be overridden in options

/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/steamship/base/client.py:537: SteamshipError