Skip to content

Commit

Permalink
Merge pull request #11 from atinylittleshell/features/execute_only
Browse files Browse the repository at this point in the history
support execute only mode
  • Loading branch information
atinylittleshell committed Aug 20, 2023
2 parents 7798de1 + 5972072 commit 571bc13
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 6 deletions.
5 changes: 5 additions & 0 deletions .changeset/new-knives-thank.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'function-gpt': minor
---

support execute_only mode
18 changes: 18 additions & 0 deletions src/session.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,20 @@ export type ChatGPTSessionMessage = {
};

export type ChatGPTSendMessageOptions = {
/**
* Stop the session after executing the function call.
* Useful when you don't need to give ChatGPT the result of the function call.
* Defaults to `false`.
*/
function_call_execute_only?: boolean;

/**
* ID of the model to use. See the
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table
* for details on which models work with the Chat API.
*/
model: string;

/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
* existing frequency in the text so far, decreasing the model's likelihood to
Expand All @@ -88,6 +96,7 @@ export type ChatGPTSendMessageOptions = {
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*/
frequency_penalty?: number | null;

/**
* Controls how the model responds to function calls. "none" means the model does
* not call a function, and responds to the end-user. "auto" means the model can
Expand All @@ -97,6 +106,7 @@ export type ChatGPTSendMessageOptions = {
* are present.
*/
function_call?: 'none' | 'auto' | { name: string };

/**
* Modify the likelihood of specified tokens appearing in the completion.
*
Expand All @@ -108,6 +118,7 @@ export type ChatGPTSendMessageOptions = {
* or exclusive selection of the relevant token.
*/
logit_bias?: Record<string, number> | null;

/**
* The maximum number of [tokens](/tokenizer) to generate in the chat completion.
*
Expand All @@ -117,6 +128,7 @@ export type ChatGPTSendMessageOptions = {
* for counting tokens.
*/
max_tokens?: number;

/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on
* whether they appear in the text so far, increasing the model's likelihood to
Expand All @@ -125,10 +137,12 @@ export type ChatGPTSendMessageOptions = {
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
*/
presence_penalty?: number | null;

/**
* Up to 4 sequences where the API will stop generating further tokens.
*/
stop?: string | null | Array<string>;

/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
* make the output more random, while lower values like 0.2 will make it more
Expand Down Expand Up @@ -232,6 +246,10 @@ export class ChatGPTSession {
content: JSON.stringify(resultValue),
});

if (options.function_call_execute_only) {
return '';
}

const response = await this.openai.chat.completions.create({
...options,
messages: this.sessionMessages,
Expand Down
32 changes: 26 additions & 6 deletions tests/session.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,6 @@ test('function calling should work', async () => {
}

class BrowseSession extends ChatGPTSession {
constructor() {
super({
apiKey: '<MY_OPENAI_API_KEY>',
});
}

@gptFunction('browse a web page and return its html content', BrowseParams)
async browse(params: BrowseParams) {
await fetch(params.url);
Expand All @@ -86,3 +80,29 @@ test('function calling should work', async () => {
expect(fetch).toHaveBeenCalledTimes(1);
expect(response).toEqual('this is a test response');
});

test('execute_only mode should work', async () => {
class BrowseParams {
@gptObjectField('string', 'url of the web page to browse', true)
public url: string = '';
}

class BrowseSession extends ChatGPTSession {
@gptFunction('browse a web page and return its html content', BrowseParams)
async browse(params: BrowseParams) {
await fetch(params.url);
}
}

const session = new BrowseSession();
const response = await session.send('this is a test message', {
model: 'gpt-3.5-turbo',
function_call_execute_only: true,
});

expect(OpenAI).toHaveBeenCalledTimes(1);
expect(session.openai.chat.completions.create).toHaveBeenCalledTimes(1);
expect(fetch).toHaveBeenCalledTimes(1);
expect(session.messages.length).toEqual(3);
expect(response).toEqual('');
});

0 comments on commit 571bc13

Please sign in to comment.