diff --git a/.changeset/new-knives-thank.md b/.changeset/new-knives-thank.md new file mode 100644 index 0000000..8619c95 --- /dev/null +++ b/.changeset/new-knives-thank.md @@ -0,0 +1,5 @@ +--- +'function-gpt': minor +--- + +support execute_only mode diff --git a/src/session.ts b/src/session.ts index 1be6b53..d4792dc 100644 --- a/src/session.ts +++ b/src/session.ts @@ -74,12 +74,20 @@ export type ChatGPTSessionMessage = { }; export type ChatGPTSendMessageOptions = { + /** + * Stop the session after executing the function call. + * Useful when you don't need to give ChatGPT the result of the function call. + * Defaults to `false`. + */ + function_call_execute_only?: boolean; + /** * ID of the model to use. See the * [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table * for details on which models work with the Chat API. */ model: string; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their * existing frequency in the text so far, decreasing the model's likelihood to @@ -88,6 +96,7 @@ export type ChatGPTSendMessageOptions = { * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) */ frequency_penalty?: number | null; + /** * Controls how the model responds to function calls. "none" means the model does * not call a function, and responds to the end-user. "auto" means the model can @@ -97,6 +106,7 @@ export type ChatGPTSendMessageOptions = { * are present. */ function_call?: 'none' | 'auto' | { name: string }; + /** * Modify the likelihood of specified tokens appearing in the completion. * @@ -108,6 +118,7 @@ export type ChatGPTSendMessageOptions = { * or exclusive selection of the relevant token. */ logit_bias?: Record | null; + /** * The maximum number of [tokens](/tokenizer) to generate in the chat completion. * @@ -117,6 +128,7 @@ export type ChatGPTSendMessageOptions = { * for counting tokens. */ max_tokens?: number; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to @@ -125,10 +137,12 @@ export type ChatGPTSendMessageOptions = { * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) */ presence_penalty?: number | null; + /** * Up to 4 sequences where the API will stop generating further tokens. */ stop?: string | null | Array; + /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will * make the output more random, while lower values like 0.2 will make it more @@ -232,6 +246,10 @@ export class ChatGPTSession { content: JSON.stringify(resultValue), }); + if (options.function_call_execute_only) { + return ''; + } + const response = await this.openai.chat.completions.create({ ...options, messages: this.sessionMessages, diff --git a/tests/session.test.ts b/tests/session.test.ts index 8fd47b3..7b80e46 100644 --- a/tests/session.test.ts +++ b/tests/session.test.ts @@ -66,12 +66,6 @@ test('function calling should work', async () => { } class BrowseSession extends ChatGPTSession { - constructor() { - super({ - apiKey: '', - }); - } - @gptFunction('browse a web page and return its html content', BrowseParams) async browse(params: BrowseParams) { await fetch(params.url); @@ -86,3 +80,29 @@ test('function calling should work', async () => { expect(fetch).toHaveBeenCalledTimes(1); expect(response).toEqual('this is a test response'); }); + +test('execute_only mode should work', async () => { + class BrowseParams { + @gptObjectField('string', 'url of the web page to browse', true) + public url: string = ''; + } + + class BrowseSession extends ChatGPTSession { + @gptFunction('browse a web page and return its html content', BrowseParams) + async browse(params: BrowseParams) { + await fetch(params.url); + } + } + + const session = new BrowseSession(); + const response = await session.send('this is a test message', { + model: 'gpt-3.5-turbo', + function_call_execute_only: true, + }); + + expect(OpenAI).toHaveBeenCalledTimes(1); + expect(session.openai.chat.completions.create).toHaveBeenCalledTimes(1); + expect(fetch).toHaveBeenCalledTimes(1); + expect(session.messages.length).toEqual(3); + expect(response).toEqual(''); +});