Skip to content

Commit b116159

Browse files
authored
Merge pull request #78 from ljoukov/functions
Add support for "functions" in OpenAI chat completion APIs.
2 parents 0641b9c + 2620356 commit b116159

File tree

7 files changed

+396
-12
lines changed

7 files changed

+396
-12
lines changed

Demo/App/APIProvidedView.swift

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,9 @@ struct APIProvidedView: View {
4242
miscStore: miscStore
4343
)
4444
.onChange(of: apiKey) { newApiKey in
45-
chatStore.openAIClient = OpenAI(apiToken: newApiKey)
45+
let client = OpenAI(apiToken: newApiKey)
46+
chatStore.openAIClient = client
47+
miscStore.openAIClient = client
4648
}
4749
}
4850
}

Demo/DemoChat/Sources/ChatStore.swift

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,22 +85,53 @@ public final class ChatStore: ObservableObject {
8585
return
8686
}
8787

88+
let weatherFunction = ChatFunctionDeclaration(
89+
name: "getWeatherData",
90+
description: "Get the current weather in a given location",
91+
parameters: .init(
92+
type: .object,
93+
properties: [
94+
"location": .init(type: .string, description: "The city and state, e.g. San Francisco, CA")
95+
],
96+
required: ["location"]
97+
)
98+
)
99+
100+
let functions = [weatherFunction]
101+
88102
let chatsStream: AsyncThrowingStream<ChatStreamResult, Error> = openAIClient.chatsStream(
89103
query: ChatQuery(
90104
model: model,
91105
messages: conversation.messages.map { message in
92106
Chat(role: message.role, content: message.content)
93-
}
107+
},
108+
functions: functions
94109
)
95110
)
96111

112+
var functionCallName = ""
113+
var functionCallArguments = ""
97114
for try await partialChatResult in chatsStream {
98115
for choice in partialChatResult.choices {
99116
let existingMessages = conversations[conversationIndex].messages
117+
// Function calls are also streamed, so we need to accumulate.
118+
if let functionCallDelta = choice.delta.functionCall {
119+
if let nameDelta = functionCallDelta.name {
120+
functionCallName += nameDelta
121+
}
122+
if let argumentsDelta = functionCallDelta.arguments {
123+
functionCallArguments += argumentsDelta
124+
}
125+
}
126+
var messageText = choice.delta.content ?? ""
127+
if let finishReason = choice.finishReason,
128+
finishReason == "function_call" {
129+
messageText += "Function call: name=\(functionCallName) arguments=\(functionCallArguments)"
130+
}
100131
let message = Message(
101132
id: partialChatResult.id,
102133
role: choice.delta.role ?? .assistant,
103-
content: choice.delta.content ?? "",
134+
content: messageText,
104135
createdAt: Date(timeIntervalSince1970: TimeInterval(partialChatResult.created))
105136
)
106137
if let existingMessageIndex = existingMessages.firstIndex(where: { $0.id == partialChatResult.id }) {

Demo/DemoChat/Sources/UI/DetailView.swift

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@ struct DetailView: View {
1717
@State var inputText: String = ""
1818
@FocusState private var isFocused: Bool
1919
@State private var showsModelSelectionSheet = false
20-
@State private var selectedChatModel: Model = .gpt3_5Turbo
20+
@State private var selectedChatModel: Model = .gpt4_0613
2121

22-
private let availableChatModels: [Model] = [.gpt3_5Turbo, .gpt4]
22+
private let availableChatModels: [Model] = [.gpt3_5Turbo0613, .gpt4_0613]
2323

2424
let conversation: Conversation
2525
let error: Error?
@@ -237,6 +237,14 @@ struct ChatBubble: View {
237237
.foregroundColor(userForegroundColor)
238238
.background(userBackgroundColor)
239239
.clipShape(RoundedRectangle(cornerRadius: 16, style: .continuous))
240+
case .function:
241+
Text(message.content)
242+
.font(.footnote.monospaced())
243+
.padding(.horizontal, 16)
244+
.padding(.vertical, 12)
245+
.background(assistantBackgroundColor)
246+
.clipShape(RoundedRectangle(cornerRadius: 16, style: .continuous))
247+
Spacer(minLength: 24)
240248
case .system:
241249
EmptyView()
242250
}
@@ -252,7 +260,14 @@ struct DetailView_Previews: PreviewProvider {
252260
messages: [
253261
Message(id: "1", role: .assistant, content: "Hello, how can I help you today?", createdAt: Date(timeIntervalSinceReferenceDate: 0)),
254262
Message(id: "2", role: .user, content: "I need help with my subscription.", createdAt: Date(timeIntervalSinceReferenceDate: 100)),
255-
Message(id: "3", role: .assistant, content: "Sure, what seems to be the problem with your subscription?", createdAt: Date(timeIntervalSinceReferenceDate: 200))
263+
Message(id: "3", role: .assistant, content: "Sure, what seems to be the problem with your subscription?", createdAt: Date(timeIntervalSinceReferenceDate: 200)),
264+
Message(id: "4", role: .function, content:
265+
"""
266+
get_current_weather({
267+
"location": "Glasgow, Scotland",
268+
"format": "celsius"
269+
})
270+
""", createdAt: Date(timeIntervalSinceReferenceDate: 200))
256271
]
257272
),
258273
error: nil,

README.md

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,8 @@ Using the OpenAI Chat API, you can build your own applications with `gpt-3.5-tur
208208
public let model: Model
209209
/// The messages to generate chat completions for
210210
public let messages: [Chat]
211+
/// A list of functions the model may generate JSON inputs for.
212+
public let functions: [ChatFunctionDeclaration]?
211213
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and We generally recommend altering this or top_p but not both.
212214
public let temperature: Double?
213215
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
@@ -318,6 +320,61 @@ for try await result in openAI.chatsStream(query: query) {
318320
}
319321
```
320322

323+
**Function calls**
324+
```swift
325+
let openAI = OpenAI(apiToken: "...")
326+
// Declare functions which GPT-3 might decide to call.
327+
let functions = [
328+
ChatFunctionDeclaration(
329+
name: "get_current_weather",
330+
description: "Get the current weather in a given location",
331+
parameters:
332+
JSONSchema(
333+
type: .object,
334+
properties: [
335+
"location": .init(type: .string, description: "The city and state, e.g. San Francisco, CA"),
336+
"unit": .init(type: .string, enumValues: ["celsius", "fahrenheit"])
337+
],
338+
required: ["location"]
339+
)
340+
)
341+
]
342+
let query = ChatQuery(
343+
model: "gpt-3.5-turbo-0613", // 0613 is the earliest version with function calls support.
344+
messages: [
345+
Chat(role: .user, content: "What's the weather like in Boston?")
346+
],
347+
functions: functions
348+
)
349+
let result = try await openAI.chats(query: query)
350+
```
351+
352+
Result will be (serialized as JSON here for readability):
353+
```json
354+
{
355+
"id": "chatcmpl-1234",
356+
"object": "chat.completion",
357+
"created": 1686000000,
358+
"model": "gpt-3.5-turbo-0613",
359+
"choices": [
360+
{
361+
"index": 0,
362+
"message": {
363+
"role": "assistant",
364+
"function_call": {
365+
"name": "get_current_weather",
366+
"arguments": "{\n \"location\": \"Boston, MA\"\n}"
367+
}
368+
},
369+
"finish_reason": "function_call"
370+
}
371+
],
372+
"usage": { "total_tokens": 100, "completion_tokens": 18, "prompt_tokens": 82 }
373+
}
374+
375+
```
376+
377+
321378
Review [Chat Documentation](https://platform.openai.com/docs/guides/chat) for more info.
322379

323380
### Images

0 commit comments

Comments
 (0)