Skip to content

refactor: add support to outputs key in inference request inputs #3405

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions ts/torch_handler/request_envelope/kservev2.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,14 @@ def _from_json(self, body_list):
)
input_names.append(input["name"])
setattr(self.context, "input_names", input_names)

output_names = []
for index, output in enumerate(body_list[0].get("outputs", [])):
output_names.append(output["name"])
# TODO: Add parameters support
# parameters = output.get("parameters")
setattr(self.context, "output_names", output_names)

logger.debug("Bytes array is %s", body_list)
id = body_list[0].get("id")
if id and id.strip():
Expand Down Expand Up @@ -167,10 +175,15 @@ def _batch_to_json(self, data):
Splits batch output to json objects
"""
output = []
input_names = getattr(self.context, "input_names")

output_names = getattr(self.context, "output_names")
delattr(self.context, "output_names")
if len(output_names) == 0:
# Re-use input names in case no output is specified
output_names = getattr(self.context, "input_names")
delattr(self.context, "input_names")
for index, item in enumerate(data):
output.append(self._to_json(item, input_names[index]))
output.append(self._to_json(item, output_names[index]))
return output

def _to_json(self, data, input_name):
Expand Down