Skip to content

Commit d857683

Browse files
committed
feat(http_options): add dynamic HTTP options support via RunConfig
Enable per-request HTTP configuration (headers, timeout, retry_options) to be passed via RunConfig and propagated through the request pipeline to models.
1 parent 82fa10b commit d857683

File tree

6 files changed

+206
-53
lines changed

6 files changed

+206
-53
lines changed

src/google/adk/agents/run_config.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030

3131
logger = logging.getLogger('google_adk.' + __name__)
3232

33-
3433
class StreamingMode(Enum):
3534
"""Streaming modes for agent execution.
3635
@@ -160,7 +159,6 @@ class StreamingMode(Enum):
160159
For bidirectional streaming, use runner.run_live() instead of run_async().
161160
"""
162161

163-
164162
class RunConfig(BaseModel):
165163
"""Configs for runtime behavior of agents.
166164
@@ -175,6 +173,9 @@ class RunConfig(BaseModel):
175173
speech_config: Optional[types.SpeechConfig] = None
176174
"""Speech configuration for the live agent."""
177175

176+
http_options: Optional[types.HttpOptions] = None
177+
"""HTTP options for the agent execution (e.g. custom headers)."""
178+
178179
response_modalities: Optional[list[str]] = None
179180
"""The output modalities. If not set, it's default to AUDIO."""
180181

src/google/adk/flows/llm_flows/base_llm_flow.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@
6767
# Statistics configuration
6868
DEFAULT_ENABLE_CACHE_STATISTICS = False
6969

70-
7170
class BaseLlmFlow(ABC):
7271
"""A basic flow that calls the LLM in a loop until a final response is generated.
7372
@@ -483,6 +482,22 @@ async def _preprocess_async(
483482
f'Expected agent to be an LlmAgent, but got {type(agent)}'
484483
)
485484

485+
# Propagate http_options from RunConfig to LlmRequest as defaults.
486+
# Request-level settings (from callbacks/processors) take precedence.
487+
if (
488+
invocation_context.run_config
489+
and invocation_context.run_config.http_options
490+
):
491+
run_opts = invocation_context.run_config.http_options
492+
if not llm_request.config.http_options:
493+
# Deep-copy to avoid mutating the user's RunConfig across steps.
494+
llm_request.config.http_options = run_opts.model_copy(deep=True)
495+
elif run_opts.headers:
496+
# Merge headers: request-level headers win (use setdefault).
497+
if not llm_request.config.http_options.headers:
498+
llm_request.config.http_options.headers = {}
499+
for key, value in run_opts.headers.items():
500+
llm_request.config.http_options.headers.setdefault(key, value)
486501
# Runs processors.
487502
for processor in self.request_processors:
488503
async with Aclosing(

src/google/adk/flows/llm_flows/basic.py

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
from __future__ import annotations
1818

1919
from typing import AsyncGenerator
20-
from typing import Generator
2120

2221
from google.genai import types
2322
from typing_extensions import override
@@ -28,7 +27,6 @@
2827
from ...utils.output_schema_utils import can_use_output_schema_with_tools
2928
from ._base_llm_processor import BaseLlmRequestProcessor
3029

31-
3230
class _BasicLlmRequestProcessor(BaseLlmRequestProcessor):
3331

3432
@override
@@ -38,11 +36,42 @@ async def run_async(
3836
agent = invocation_context.agent
3937
model = agent.canonical_model
4038
llm_request.model = model if isinstance(model, str) else model.model
39+
40+
# Preserve http_options propagated from RunConfig
41+
run_config_http_options = llm_request.config.http_options
42+
4143
llm_request.config = (
4244
agent.generate_content_config.model_copy(deep=True)
4345
if agent.generate_content_config
4446
else types.GenerateContentConfig()
4547
)
48+
49+
if run_config_http_options:
50+
# Merge RunConfig http_options back, overriding agent config
51+
if not llm_request.config.http_options:
52+
llm_request.config.http_options = run_config_http_options
53+
else:
54+
# Merge headers
55+
if run_config_http_options.headers:
56+
if not llm_request.config.http_options.headers:
57+
llm_request.config.http_options.headers = {}
58+
llm_request.config.http_options.headers.update(
59+
run_config_http_options.headers
60+
)
61+
62+
# Merge other http_options fields if present in RunConfig.
63+
# RunConfig values override agent defaults.
64+
# Note: base_url, api_version, base_url_resource_scope are intentionally
65+
# excluded as they are configuration-time settings, not request-time.
66+
for field in [
67+
'timeout',
68+
'retry_options',
69+
'extra_body',
70+
]:
71+
val = getattr(run_config_http_options, field, None)
72+
if val is not None:
73+
setattr(llm_request.config.http_options, field, val)
74+
4675
# Only set output_schema if no tools are specified. as of now, model don't
4776
# support output_schema and tools together. we have a workaround to support
4877
# both output_schema and tools at the same time. see
@@ -84,5 +113,4 @@ async def run_async(
84113
return
85114
yield # Generator requires yield statement in function body.
86115

87-
88116
request_processor = _BasicLlmRequestProcessor()

0 commit comments

Comments
 (0)