Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 19 additions & 23 deletions FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -61,20 +61,25 @@ struct LiveSessionTests {
static let yesOrNo = ModelContent(
role: "system",
parts: """
You can only respond with "yes" or "no".
You can only respond with "yes" or "no".
""".trimmingCharacters(in: .whitespacesAndNewlines)
)

static let helloGoodbye = ModelContent(
role: "system",
parts: """
When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is broken".
When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is \
broken".
""".trimmingCharacters(in: .whitespacesAndNewlines)
)

static let lastNames = ModelContent(
role: "system",
parts: "When you receive a message, if the message is a single word, assume it's the first name of a person, and call the getLastName tool to get the last name of said person. Only respond with the last name."
parts: """
When you receive a message, if the message is a single word, assume it's the first name of a \
person, and call the getLastName tool to get the last name of said person. Only respond with \
the last name.
""".trimmingCharacters(in: .whitespacesAndNewlines)
)

static let animalInVideo = ModelContent(
Expand Down Expand Up @@ -142,10 +147,9 @@ struct LiveSessionTests {

let session = try await model.connect()

guard let audioFile = NSDataAsset(name: "hello") else {
Issue.record("Missing audio file 'hello.wav' in Assets")
return
}
let audioFile = try #require(
NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
)
await session.sendAudioRealtime(audioFile.data)
// The model can't infer that we're done speaking until we send null bytes
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
Expand All @@ -171,10 +175,9 @@ struct LiveSessionTests {

let session = try await model.connect()

guard let audioFile = NSDataAsset(name: "hello") else {
Issue.record("Missing audio file 'hello.wav' in Assets")
return
}
let audioFile = try #require(
NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
)
await session.sendAudioRealtime(audioFile.data)
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))

Expand Down Expand Up @@ -281,7 +284,7 @@ struct LiveSessionTests {
}

@Test(arguments: arguments.filter {
// TODO: (b/450982184) Remove when vertex adds support
// TODO: (b/450982184) Remove when Vertex AI adds support for Function IDs and Cancellation
switch $0.0.apiConfig.service {
case .googleAI:
true
Expand All @@ -291,12 +294,6 @@ struct LiveSessionTests {
})
func realtime_functionCalling_cancellation(_ config: InstanceConfig,
modelName: String) async throws {
// TODO: (b/450982184) Remove when vertex adds support
guard case .googleAI = config.apiConfig.service else {
Issue.record("Vertex does not currently support function ids or function cancellation.")
return
}

let model = FirebaseAI.componentInstance(config).liveModel(
modelName: modelName,
generationConfig: textConfig,
Expand Down Expand Up @@ -337,17 +334,16 @@ struct LiveSessionTests {
generationConfig: audioConfig
)

guard let audioFile = NSDataAsset(name: "hello") else {
Issue.record("Missing audio file 'hello.wav' in Assets")
return
}
let audioFile = try #require(
NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
)

try await retry(times: 3, delayInSeconds: 2.0) {
let session = try await model.connect()
await session.sendAudioRealtime(audioFile.data)
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))

// wait a second to allow the model to start generating (and cuase a proper interruption)
// Wait a second to allow the model to start generating (and cause a proper interruption)
try await Task.sleep(nanoseconds: oneSecondInNanoseconds)
await session.sendAudioRealtime(audioFile.data)
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ struct InstanceConfig: Equatable, Encodable {
apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta)
)
static let googleAI_v1beta_appCheckLimitedUse = InstanceConfig(
useLimitedUseAppCheckTokens: true,
apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta)
)
static let googleAI_v1beta_staging = InstanceConfig(
Expand Down Expand Up @@ -164,7 +165,7 @@ extension InstanceConfig: CustomTestStringConvertible {
}
let locationSuffix: String
if case let .vertexAI(_, location: location) = apiConfig.service {
locationSuffix = location
locationSuffix = " - (\(location))"
} else {
locationSuffix = ""
}
Expand Down
Loading