Skip to content

Commit 03093b7

Browse files
authored
Fix "No call message found" bug with gpt-oss-120b when using responses API
1 parent 760fc3b commit 03093b7

File tree

1 file changed

+7
-7
lines changed
  • libs/partners/openai/langchain_openai/chat_models

1 file changed

+7
-7
lines changed

libs/partners/openai/langchain_openai/chat_models/base.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3987,7 +3987,7 @@ def _construct_lc_result_from_responses_api(
39873987
{"type": "refusal", "refusal": content.refusal, "id": output.id}
39883988
)
39893989
elif output.type == "function_call":
3990-
content_blocks.append(output.model_dump(exclude_none=True, mode="json"))
3990+
content_blocks.append(output.model_dump(exclude_none=False, mode="json"))
39913991
try:
39923992
args = json.loads(output.arguments, strict=False)
39933993
error = None
@@ -4012,7 +4012,7 @@ def _construct_lc_result_from_responses_api(
40124012
}
40134013
invalid_tool_calls.append(tool_call)
40144014
elif output.type == "custom_tool_call":
4015-
content_blocks.append(output.model_dump(exclude_none=True, mode="json"))
4015+
content_blocks.append(output.model_dump(exclude_none=False, mode="json"))
40164016
tool_call = {
40174017
"type": "tool_call",
40184018
"name": output.name,
@@ -4031,7 +4031,7 @@ def _construct_lc_result_from_responses_api(
40314031
"mcp_approval_request",
40324032
"image_generation_call",
40334033
):
4034-
content_blocks.append(output.model_dump(exclude_none=True, mode="json"))
4034+
content_blocks.append(output.model_dump(exclude_none=False, mode="json"))
40354035

40364036
# Workaround for parsing structured output in the streaming case.
40374037
# from openai import OpenAI
@@ -4160,7 +4160,7 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None:
41604160
# Appears to be a breaking change in openai==1.82.0
41614161
annotation = chunk.annotation
41624162
else:
4163-
annotation = chunk.annotation.model_dump(exclude_none=True, mode="json")
4163+
annotation = chunk.annotation.model_dump(exclude_none=False, mode="json")
41644164

41654165
content.append(
41664166
{
@@ -4232,15 +4232,15 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None:
42324232
"image_generation_call",
42334233
):
42344234
_advance(chunk.output_index)
4235-
tool_output = chunk.item.model_dump(exclude_none=True, mode="json")
4235+
tool_output = chunk.item.model_dump(exclude_none=False, mode="json")
42364236
tool_output["index"] = current_index
42374237
content.append(tool_output)
42384238
elif (
42394239
chunk.type == "response.output_item.done"
42404240
and chunk.item.type == "custom_tool_call"
42414241
):
42424242
_advance(chunk.output_index)
4243-
tool_output = chunk.item.model_dump(exclude_none=True, mode="json")
4243+
tool_output = chunk.item.model_dump(exclude_none=False, mode="json")
42444244
tool_output["index"] = current_index
42454245
content.append(tool_output)
42464246
tool_call_chunks.append(
@@ -4265,7 +4265,7 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None:
42654265
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
42664266
_advance(chunk.output_index)
42674267
current_sub_index = 0
4268-
reasoning = chunk.item.model_dump(exclude_none=True, mode="json")
4268+
reasoning = chunk.item.model_dump(exclude_none=False, mode="json")
42694269
reasoning["index"] = current_index
42704270
content.append(reasoning)
42714271
elif chunk.type == "response.reasoning_summary_part.added":

0 commit comments

Comments
 (0)