From 3b3502bc9c2e750e87fe5bab12af9b34d510df81 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Thu, 16 Oct 2025 21:06:03 -0400 Subject: [PATCH 1/3] [Firebase AI] Fix Google AI `useLimitedUseAppCheckTokens` config --- .../Tests/Integration/LiveSessionTests.swift | 31 +++++++------------ .../Tests/Utilities/InstanceConfig.swift | 3 +- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift b/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift index cb341697043..338a80ccb97 100644 --- a/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift +++ b/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift @@ -134,10 +134,9 @@ struct LiveSessionTests { let session = try await model.connect() - guard let audioFile = NSDataAsset(name: "hello") else { - Issue.record("Missing audio file 'hello.wav' in Assets") - return - } + let audioFile = try #require( + NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets" + ) await session.sendAudioRealtime(audioFile.data) // The model can't infer that we're done speaking until we send null bytes await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count)) @@ -163,10 +162,9 @@ struct LiveSessionTests { let session = try await model.connect() - guard let audioFile = NSDataAsset(name: "hello") else { - Issue.record("Missing audio file 'hello.wav' in Assets") - return - } + let audioFile = try #require( + NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets" + ) await session.sendAudioRealtime(audioFile.data) await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count)) @@ -230,7 +228,7 @@ struct LiveSessionTests { } @Test(arguments: arguments.filter { - // TODO: (b/450982184) Remove when vertex adds support + // TODO: (b/450982184) Remove when Vertex AI adds support for Function IDs and Cancellation switch $0.0.apiConfig.service { case .googleAI: true @@ -240,12 +238,6 @@ struct LiveSessionTests { }) func realtime_functionCalling_cancellation(_ config: InstanceConfig, modelName: String) async throws { - // TODO: (b/450982184) Remove when vertex adds support - guard case .googleAI = config.apiConfig.service else { - Issue.record("Vertex does not currently support function ids or function cancellation.") - return - } - let model = FirebaseAI.componentInstance(config).liveModel( modelName: modelName, generationConfig: textConfig, @@ -288,14 +280,13 @@ struct LiveSessionTests { let session = try await model.connect() - guard let audioFile = NSDataAsset(name: "hello") else { - Issue.record("Missing audio file 'hello.wav' in Assets") - return - } + let audioFile = try #require( + NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets" + ) await session.sendAudioRealtime(audioFile.data) await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count)) - // wait a second to allow the model to start generating (and cuase a proper interruption) + // Wait a second to allow the model to start generating (and cause a proper interruption) try await Task.sleep(nanoseconds: oneSecondInNanoseconds) await session.sendAudioRealtime(audioFile.data) await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count)) diff --git a/FirebaseAI/Tests/TestApp/Tests/Utilities/InstanceConfig.swift b/FirebaseAI/Tests/TestApp/Tests/Utilities/InstanceConfig.swift index 12b8f4da70b..1c515957a36 100644 --- a/FirebaseAI/Tests/TestApp/Tests/Utilities/InstanceConfig.swift +++ b/FirebaseAI/Tests/TestApp/Tests/Utilities/InstanceConfig.swift @@ -56,6 +56,7 @@ struct InstanceConfig: Equatable, Encodable { apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta) ) static let googleAI_v1beta_appCheckLimitedUse = InstanceConfig( + useLimitedUseAppCheckTokens: true, apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta) ) static let googleAI_v1beta_staging = InstanceConfig( @@ -164,7 +165,7 @@ extension InstanceConfig: CustomTestStringConvertible { } let locationSuffix: String if case let .vertexAI(_, location: location) = apiConfig.service { - locationSuffix = location + locationSuffix = " - (\(location))" } else { locationSuffix = "" } From ca797cf488c8c530ec4c199fcebba784f4bf6679 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Tue, 21 Oct 2025 18:22:24 -0400 Subject: [PATCH 2/3] Fix formatting and merge conflict --- .../Tests/Integration/LiveSessionTests.swift | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift b/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift index ee9d88e1b47..315939ecab8 100644 --- a/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift +++ b/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift @@ -61,20 +61,25 @@ struct LiveSessionTests { static let yesOrNo = ModelContent( role: "system", parts: """ - You can only respond with "yes" or "no". + You can only respond with "yes" or "no". """.trimmingCharacters(in: .whitespacesAndNewlines) ) static let helloGoodbye = ModelContent( role: "system", parts: """ - When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is broken". + When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is \ + broken". """.trimmingCharacters(in: .whitespacesAndNewlines) ) static let lastNames = ModelContent( role: "system", - parts: "When you receive a message, if the message is a single word, assume it's the first name of a person, and call the getLastName tool to get the last name of said person. Only respond with the last name." + parts: """ + When you receive a message, if the message is a single word, assume it's the first name of a \ + person, and call the getLastName tool to get the last name of said person. Only respond with \ + the last name. + """ ) static let animalInVideo = ModelContent( @@ -333,16 +338,12 @@ struct LiveSessionTests { NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets" ) - for try await content in session.responsesOf(LiveServerContent.self) { - if content.wasInterrupted { - break - } try await retry(times: 3, delayInSeconds: 2.0) { let session = try await model.connect() await session.sendAudioRealtime(audioFile.data) await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count)) - // wait a second to allow the model to start generating (and cuase a proper interruption) + // Wait a second to allow the model to start generating (and cause a proper interruption) try await Task.sleep(nanoseconds: oneSecondInNanoseconds) await session.sendAudioRealtime(audioFile.data) await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count)) From 5615fd7fee820f8549d630daaea1d91bd1b94a5b Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Tue, 21 Oct 2025 18:33:15 -0400 Subject: [PATCH 3/3] Trim whitespace characters Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .../Tests/TestApp/Tests/Integration/LiveSessionTests.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift b/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift index 315939ecab8..6d77f87c0fe 100644 --- a/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift +++ b/FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift @@ -79,7 +79,7 @@ struct LiveSessionTests { When you receive a message, if the message is a single word, assume it's the first name of a \ person, and call the getLastName tool to get the last name of said person. Only respond with \ the last name. - """ + """.trimmingCharacters(in: .whitespacesAndNewlines) ) static let animalInVideo = ModelContent(