diff --git a/.changeset/warm-panthers-cover.md b/.changeset/warm-panthers-cover.md new file mode 100644 index 0000000..643b5bd --- /dev/null +++ b/.changeset/warm-panthers-cover.md @@ -0,0 +1,5 @@ +--- +'function-gpt': minor +--- + +Add API reference documentation diff --git a/README.md b/README.md index db296c0..e8b65dc 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,10 @@ const response = await session.send('count characters in the html content of htt expect(response).toBe('There are 4096 characters in the html content of https://www.google.com/.'); ``` +## API References + +See [API references](./doc/README.md) for more detailed information on how to use the library. + ## Installation ```bash diff --git a/doc/.nojekyll b/doc/.nojekyll new file mode 100644 index 0000000..e2ac661 --- /dev/null +++ b/doc/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 0000000..3a67ab4 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,187 @@ +function-gpt + +# function-gpt + +## Table of contents + +### Classes + +- [ChatGPTSession](classes/ChatGPTSession.md) + +### Type Aliases + +- [ChatGPTFunctionCall](README.md#chatgptfunctioncall) +- [ChatGPTSendMessageOptions](README.md#chatgptsendmessageoptions) +- [ChatGPTSessionMessage](README.md#chatgptsessionmessage) +- [ChatGPTSessionOptions](README.md#chatgptsessionoptions) + +### Functions + +- [gptFunction](README.md#gptfunction) +- [gptObjectField](README.md#gptobjectfield) + +## Type Aliases + +### ChatGPTFunctionCall + +Ƭ **ChatGPTFunctionCall**: `Object` + +Represents a function call requested by ChatGPT. + +#### Type declaration + +| Name | Type | +| :------ | :------ | +| `arguments` | `string` | +| `name` | `string` | + +#### Defined in + +[src/session.ts:71](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L71) + +___ + +### ChatGPTSendMessageOptions + +Ƭ **ChatGPTSendMessageOptions**: `Object` + +Options for the ChatGPTSession.send method. + +**`See`** + +[OpenAI Chat Completion API](https://platform.openai.com/docs/api-reference/chat/create). + +#### Type declaration + +| Name | Type | Description | +| :------ | :------ | :------ | +| `frequency_penalty?` | `number` \| ``null`` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. **`See`** [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details) | +| `function_call?` | ``"none"`` \| ``"auto"`` \| { `name`: `string` } | Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. | +| `function_call_execute_only?` | `boolean` | Stop the session after executing the function call. Useful when you don't need to give ChatGPT the result of the function call. Defaults to `false`. | +| `logit_bias?` | `Record`<`string`, `number`\> \| ``null`` | Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | +| `max_tokens?` | `number` | The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. | +| `model` | `string` | ID of the model to use. **`See`** [model endpoint compatibility](https://platform.openai.com/docs/models/overview) | +| `presence_penalty?` | `number` \| ``null`` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details) | +| `stop?` | `string` \| ``null`` \| `string`[] | Up to 4 sequences where the API will stop generating further tokens. | +| `temperature?` | `number` \| ``null`` | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | +| `top_p?` | `number` \| ``null`` | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | +| `user?` | `string` | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. **`See`** [Learn more](https://platform.openai.com/docs/guides/safety-best-practices). | + +#### Defined in + +[src/session.ts:91](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L91) + +___ + +### ChatGPTSessionMessage + +Ƭ **ChatGPTSessionMessage**: `Object` + +Represents a message in a ChatGPT session. + +#### Type declaration + +| Name | Type | +| :------ | :------ | +| `content` | `string` \| ``null`` | +| `function_call?` | [`ChatGPTFunctionCall`](README.md#chatgptfunctioncall) | +| `name?` | `string` | +| `role` | ``"system"`` \| ``"user"`` \| ``"assistant"`` \| ``"function"`` | + +#### Defined in + +[src/session.ts:79](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L79) + +___ + +### ChatGPTSessionOptions + +Ƭ **ChatGPTSessionOptions**: { `systemMessage?`: `string` } & `ClientOptions` + +Options for the ChatGPTSession constructor. Compatible with the OpenAI node client options. + +**`See`** + +[OpenAI Node Client](https://github.com/openai/openai-node) + +#### Defined in + +[src/session.ts:64](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L64) + +## Functions + +### gptFunction + +▸ **gptFunction**(`description`, `inputType`): (`target`: `object`, `propertyKey`: `string`, `descriptor`: `PropertyDescriptor`) => `void` + +Use this decorator on a method within a ChatGPTSession subclass to enable it for function-calling. + +#### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `description` | `string` | A description of the function. | +| `inputType` | () => `unknown` | Input for the function should be an object instance of a custom class. This parameter specifies the class of the object. | + +#### Returns + +`fn` + +▸ (`target`, `propertyKey`, `descriptor`): `void` + +##### Parameters + +| Name | Type | +| :------ | :------ | +| `target` | `object` | +| `propertyKey` | `string` | +| `descriptor` | `PropertyDescriptor` | + +##### Returns + +`void` + +**`See`** + +[gptObjectField](README.md#gptobjectfield) + +#### Defined in + +[src/decorators.ts:19](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/decorators.ts#L19) + +___ + +### gptObjectField + +▸ **gptObjectField**(`type`, `description`, `optional?`): (`target`: `object`, `propertyKey`: `string`) => `void` + +Use this decorator on a property within a custom class to include it as a parameter for function-calling. + +#### Parameters + +| Name | Type | Default value | Description | +| :------ | :------ | :------ | :------ | +| `type` | ``"string"`` \| ``"number"`` \| ``"boolean"`` \| [``"string"`` \| ``"number"`` \| ``"boolean"``] \| [() => `unknown`] \| () => `unknown` | `undefined` | Type of the field. Use `'string'`, `'number'`, `'boolean'` for primitive types. Use `['string']`, `['number']`, `['boolean']` for arrays of primitive types. Use a ClassName for custom types. Use `[ClassName]` for arrays of custom types. | +| `description` | `string` | `undefined` | Description of the field. | +| `optional` | `boolean` | `false` | Whether the field is optional. Default to `false`. | + +#### Returns + +`fn` + +▸ (`target`, `propertyKey`): `void` + +##### Parameters + +| Name | Type | +| :------ | :------ | +| `target` | `object` | +| `propertyKey` | `string` | + +##### Returns + +`void` + +#### Defined in + +[src/decorators.ts:53](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/decorators.ts#L53) diff --git a/doc/classes/ChatGPTSession.md b/doc/classes/ChatGPTSession.md new file mode 100644 index 0000000..2f1955a --- /dev/null +++ b/doc/classes/ChatGPTSession.md @@ -0,0 +1,158 @@ +[function-gpt](../README.md) / ChatGPTSession + +# Class: ChatGPTSession + +Extend this class to create your own function-calling enabled ChatGPT session. +Provide functions to the assistant by decorating them with the `@gptFunction` decorator. + +**`See`** + +[gptFunction](../README.md#gptfunction) + +## Table of contents + +### Constructors + +- [constructor](ChatGPTSession.md#constructor) + +### Properties + +- [metadata](ChatGPTSession.md#metadata) +- [openai](ChatGPTSession.md#openai) +- [options](ChatGPTSession.md#options) +- [sessionMessages](ChatGPTSession.md#sessionmessages) + +### Accessors + +- [messages](ChatGPTSession.md#messages) + +### Methods + +- [processAssistantMessage](ChatGPTSession.md#processassistantmessage) +- [send](ChatGPTSession.md#send) + +## Constructors + +### constructor + +• **new ChatGPTSession**(`options?`) + +#### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `options` | [`ChatGPTSessionOptions`](../README.md#chatgptsessionoptions) | Options for the ChatGPTSession constructor. | + +**`See`** + +[ChatGPTSessionOptions](../README.md#chatgptsessionoptions) + +#### Defined in + +[src/session.ts:204](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L204) + +## Properties + +### metadata + +• `Private` `Readonly` **metadata**: `GPTClientMetadata` + +#### Defined in + +[src/session.ts:196](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L196) + +___ + +### openai + +• `Readonly` **openai**: `OpenAI` + +#### Defined in + +[src/session.ts:195](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L195) + +___ + +### options + +• `Private` `Readonly` **options**: [`ChatGPTSessionOptions`](../README.md#chatgptsessionoptions) = `{}` + +Options for the ChatGPTSession constructor. + +#### Defined in + +[src/session.ts:204](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L204) + +___ + +### sessionMessages + +• `Private` **sessionMessages**: [`ChatGPTSessionMessage`](../README.md#chatgptsessionmessage)[] = `[]` + +#### Defined in + +[src/session.ts:197](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L197) + +## Accessors + +### messages + +• `get` **messages**(): [`ChatGPTSessionMessage`](../README.md#chatgptsessionmessage)[] + +#### Returns + +[`ChatGPTSessionMessage`](../README.md#chatgptsessionmessage)[] + +The messages sent to and from the assistant so far. + +#### Defined in + +[src/session.ts:254](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L254) + +## Methods + +### processAssistantMessage + +▸ `Private` **processAssistantMessage**(`message`, `options`): `Promise`<`string`\> + +#### Parameters + +| Name | Type | +| :------ | :------ | +| `message` | [`ChatGPTSessionMessage`](../README.md#chatgptsessionmessage) | +| `options` | [`ChatGPTSendMessageOptions`](../README.md#chatgptsendmessageoptions) | + +#### Returns + +`Promise`<`string`\> + +#### Defined in + +[src/session.ts:258](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L258) + +___ + +### send + +▸ **send**(`message`, `options?`): `Promise`<`string`\> + +#### Parameters + +| Name | Type | Description | +| :------ | :------ | :------ | +| `message` | `string` | The user message to send to the assistant. | +| `options` | [`ChatGPTSendMessageOptions`](../README.md#chatgptsendmessageoptions) | Options for the ChatGPTSession.send method. | + +#### Returns + +`Promise`<`string`\> + +The assistant's response. + +**`See`** + +[ChatGPTSendMessageOptions](../README.md#chatgptsendmessageoptions) + +#### Defined in + +[src/session.ts:221](https://github.com/atinylittleshell/function-gpt/blob/04eb21b/src/session.ts#L221) diff --git a/package.json b/package.json index 4e3de64..287f686 100644 --- a/package.json +++ b/package.json @@ -38,6 +38,7 @@ "node": ">=16.0.0" }, "scripts": { + "doc": "shx rm -rf doc && typedoc", "build": "tsup", "lint": "eslint . --ext .ts", "test": "vitest run --coverage", @@ -61,7 +62,10 @@ "eslint-plugin-prettier": "^5.0.0", "eslint-plugin-simple-import-sort": "^10.0.0", "prettier": "^3.0.2", + "shx": "^0.3.4", "tsup": "^7.2.0", + "typedoc": "^0.24.8", + "typedoc-plugin-markdown": "^3.15.4", "typescript": "^5.1.6", "vitest": "^0.34.1" }, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f19eb24..2f7d4d5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -49,9 +49,18 @@ devDependencies: prettier: specifier: ^3.0.2 version: 3.0.2 + shx: + specifier: ^0.3.4 + version: 0.3.4 tsup: specifier: ^7.2.0 version: 7.2.0(typescript@5.1.6) + typedoc: + specifier: ^0.24.8 + version: 0.24.8(typescript@5.1.6) + typedoc-plugin-markdown: + specifier: ^3.15.4 + version: 3.15.4(typedoc@0.24.8) typescript: specifier: ^5.1.6 version: 5.1.6 @@ -949,6 +958,10 @@ packages: engines: {node: '>=8'} dev: true + /ansi-sequence-parser@1.1.1: + resolution: {integrity: sha512-vJXt3yiaUL4UU546s3rPXlsry/RnM730G1+HkpKE012AN0sx1eOrxSu95oKDIonskeLTijMgqWZ3uDEe3NFvyg==} + dev: true + /ansi-styles@3.2.1: resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} engines: {node: '>=4'} @@ -1113,6 +1126,12 @@ packages: concat-map: 0.0.1 dev: true + /brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + dependencies: + balanced-match: 1.0.2 + dev: true + /braces@3.0.2: resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} engines: {node: '>=8'} @@ -2117,6 +2136,19 @@ packages: resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} dev: true + /handlebars@4.7.8: + resolution: {integrity: sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==} + engines: {node: '>=0.4.7'} + hasBin: true + dependencies: + minimist: 1.2.8 + neo-async: 2.6.2 + source-map: 0.6.1 + wordwrap: 1.0.0 + optionalDependencies: + uglify-js: 3.17.4 + dev: true + /hard-rejection@2.1.0: resolution: {integrity: sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==} engines: {node: '>=6'} @@ -2244,6 +2276,11 @@ packages: side-channel: 1.0.4 dev: true + /interpret@1.4.0: + resolution: {integrity: sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==} + engines: {node: '>= 0.10'} + dev: true + /is-array-buffer@3.0.2: resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==} dependencies: @@ -2627,6 +2664,10 @@ packages: yallist: 4.0.0 dev: true + /lunr@2.3.9: + resolution: {integrity: sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==} + dev: true + /magic-string@0.30.2: resolution: {integrity: sha512-lNZdu7pewtq/ZvWUp9Wpf/x7WzMTsR26TWV03BRZrXFsv+BI6dy8RAiKgm1uM/kyR0rCfUcqvOlXKG66KhIGug==} engines: {node: '>=12'} @@ -2651,6 +2692,12 @@ packages: engines: {node: '>=8'} dev: true + /marked@4.3.0: + resolution: {integrity: sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==} + engines: {node: '>= 12'} + hasBin: true + dev: true + /md5@2.3.0: resolution: {integrity: sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==} dependencies: @@ -2726,6 +2773,13 @@ packages: brace-expansion: 1.1.11 dev: true + /minimatch@9.0.3: + resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==} + engines: {node: '>=16 || 14 >=14.17'} + dependencies: + brace-expansion: 2.0.1 + dev: true + /minimist-options@4.1.0: resolution: {integrity: sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==} engines: {node: '>= 6'} @@ -2774,6 +2828,10 @@ packages: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} dev: true + /neo-async@2.6.2: + resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} + dev: true + /node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} @@ -3203,6 +3261,13 @@ packages: picomatch: 2.3.1 dev: true + /rechoir@0.6.2: + resolution: {integrity: sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==} + engines: {node: '>= 0.10'} + dependencies: + resolve: 1.22.4 + dev: true + /redent@3.0.0: resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} engines: {node: '>=8'} @@ -3353,6 +3418,34 @@ packages: engines: {node: '>=8'} dev: true + /shelljs@0.8.5: + resolution: {integrity: sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==} + engines: {node: '>=4'} + hasBin: true + dependencies: + glob: 7.1.6 + interpret: 1.4.0 + rechoir: 0.6.2 + dev: true + + /shiki@0.14.3: + resolution: {integrity: sha512-U3S/a+b0KS+UkTyMjoNojvTgrBHjgp7L6ovhFVZsXmBGnVdQ4K4U9oK0z63w538S91ATngv1vXigHCSWOwnr+g==} + dependencies: + ansi-sequence-parser: 1.1.1 + jsonc-parser: 3.2.0 + vscode-oniguruma: 1.7.0 + vscode-textmate: 8.0.0 + dev: true + + /shx@0.3.4: + resolution: {integrity: sha512-N6A9MLVqjxZYcVn8hLmtneQWIJtp8IKzMP4eMnx+nqkvXoqinUPCbUFLp2UcWTEIUONhlk0ewxr/jaVGlc+J+g==} + engines: {node: '>=6'} + hasBin: true + dependencies: + minimist: 1.2.8 + shelljs: 0.8.5 + dev: true + /side-channel@1.0.4: resolution: {integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==} dependencies: @@ -3796,6 +3889,29 @@ packages: is-typed-array: 1.1.12 dev: true + /typedoc-plugin-markdown@3.15.4(typedoc@0.24.8): + resolution: {integrity: sha512-KpjFL/NDrQAbY147oIoOgob2vAdEchsMcTVd6+e6H2lC1l5xhi48bhP/fMJI7qYQ8th5nubervgqw51z7gY66A==} + peerDependencies: + typedoc: '>=0.24.0' + dependencies: + handlebars: 4.7.8 + typedoc: 0.24.8(typescript@5.1.6) + dev: true + + /typedoc@0.24.8(typescript@5.1.6): + resolution: {integrity: sha512-ahJ6Cpcvxwaxfu4KtjA8qZNqS43wYt6JL27wYiIgl1vd38WW/KWX11YuAeZhuz9v+ttrutSsgK+XO1CjL1kA3w==} + engines: {node: '>= 14.14'} + hasBin: true + peerDependencies: + typescript: 4.6.x || 4.7.x || 4.8.x || 4.9.x || 5.0.x || 5.1.x + dependencies: + lunr: 2.3.9 + marked: 4.3.0 + minimatch: 9.0.3 + shiki: 0.14.3 + typescript: 5.1.6 + dev: true + /typescript@5.1.6: resolution: {integrity: sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==} engines: {node: '>=14.17'} @@ -3806,6 +3922,14 @@ packages: resolution: {integrity: sha512-RsPyTbqORDNDxqAdQPQBpgqhWle1VcTSou/FraClYlHf6TZnQcGslpLcAphNR+sQW4q5lLWLbOsRlh9j24baQg==} dev: true + /uglify-js@3.17.4: + resolution: {integrity: sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==} + engines: {node: '>=0.8.0'} + hasBin: true + requiresBuild: true + dev: true + optional: true + /unbox-primitive@1.0.2: resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} dependencies: @@ -3970,6 +4094,14 @@ packages: - terser dev: true + /vscode-oniguruma@1.7.0: + resolution: {integrity: sha512-L9WMGRfrjOhgHSdOYgCt/yRMsXzLDJSL7BPrOZt73gU0iWO4mpqzqQzOz5srxqTvMBaR0XZTSrVWo4j55Rc6cA==} + dev: true + + /vscode-textmate@8.0.0: + resolution: {integrity: sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==} + dev: true + /wcwidth@1.0.1: resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==} dependencies: @@ -4061,6 +4193,10 @@ packages: stackback: 0.0.2 dev: true + /wordwrap@1.0.0: + resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} + dev: true + /wrap-ansi@6.2.0: resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} engines: {node: '>=8'} diff --git a/src/decorators.ts b/src/decorators.ts index f3d8eba..58310a6 100644 --- a/src/decorators.ts +++ b/src/decorators.ts @@ -7,6 +7,15 @@ import { } from './internals.js'; import { ChatGPTSession } from './session.js'; +/** + * Use this decorator on a method within a ChatGPTSession subclass to enable it for function-calling. + * + * @param description - A description of the function. + * @param inputType - Input for the function should be an object instance of a custom class. + * This parameter specifies the class of the object. + * + * @see {@link gptObjectField} + */ export function gptFunction(description: string, inputType: new () => unknown) { return function (target: object, propertyKey: string, descriptor: PropertyDescriptor) { const ctor = target.constructor as new () => ChatGPTSession; @@ -30,6 +39,17 @@ export function gptFunction(description: string, inputType: new () => unknown) { }; } +/** + * Use this decorator on a property within a custom class to include it as a parameter for function-calling. + * + * @param type - Type of the field. + * Use `'string'`, `'number'`, `'boolean'` for primitive types. + * Use `['string']`, `['number']`, `['boolean']` for arrays of primitive types. + * Use a ClassName for custom types. + * Use `[ClassName]` for arrays of custom types. + * @param description - Description of the field. + * @param optional - Whether the field is optional. Default to `false`. + */ export function gptObjectField( type: 'string' | 'number' | 'boolean' | ['string' | 'number' | 'boolean'] | (new () => unknown) | [new () => unknown], description: string, diff --git a/src/session.ts b/src/session.ts index 7634ac8..42014c3 100644 --- a/src/session.ts +++ b/src/session.ts @@ -56,16 +56,26 @@ const describeField = (description: string | null, fieldType: GPTTypeMetadata) = return result; }; +/** + * Options for the ChatGPTSession constructor. Compatible with the OpenAI node client options. + * + * @see [OpenAI Node Client](https://github.com/openai/openai-node) + */ export type ChatGPTSessionOptions = { systemMessage?: string; - model?: string; } & ClientOptions; +/** + * Represents a function call requested by ChatGPT. + */ export type ChatGPTFunctionCall = { arguments: string; name: string; }; +/** + * Represents a message in a ChatGPT session. + */ export type ChatGPTSessionMessage = { role: 'system' | 'user' | 'assistant' | 'function'; name?: string; @@ -73,6 +83,11 @@ export type ChatGPTSessionMessage = { function_call?: ChatGPTFunctionCall; }; +/** + * Options for the ChatGPTSession.send method. + * + * @see [OpenAI Chat Completion API](https://platform.openai.com/docs/api-reference/chat/create). + */ export type ChatGPTSendMessageOptions = { /** * Stop the session after executing the function call. @@ -82,9 +97,9 @@ export type ChatGPTSendMessageOptions = { function_call_execute_only?: boolean; /** - * ID of the model to use. See the - * [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table - * for details on which models work with the Chat API. + * ID of the model to use. + * + * @see [model endpoint compatibility](https://platform.openai.com/docs/models/overview) */ model: string; @@ -93,7 +108,7 @@ export type ChatGPTSendMessageOptions = { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * @see [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details) */ frequency_penalty?: number | null; @@ -134,7 +149,7 @@ export type ChatGPTSendMessageOptions = { * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details) */ presence_penalty?: number | null; @@ -163,16 +178,29 @@ export type ChatGPTSendMessageOptions = { /** * A unique identifier representing your end-user, which can help OpenAI to monitor - * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * and detect abuse. + * + * @see [Learn more](https://platform.openai.com/docs/guides/safety-best-practices). */ user?: string; }; +/** + * Extend this class to create your own function-calling enabled ChatGPT session. + * Provide functions to the assistant by decorating them with the `@gptFunction` decorator. + * + * @see {@link gptFunction} + */ export class ChatGPTSession { public readonly openai: OpenAI; private readonly metadata: GPTClientMetadata; private sessionMessages: ChatGPTSessionMessage[] = []; + /** + * @param options - Options for the ChatGPTSession constructor. + * + * @see {@link ChatGPTSessionOptions} + */ constructor(private readonly options: ChatGPTSessionOptions = {}) { this.openai = new OpenAI(options); @@ -183,6 +211,13 @@ export class ChatGPTSession { this.metadata = metadata; } + /** + * @param message - The user message to send to the assistant. + * @param options - Options for the ChatGPTSession.send method. + * @returns The assistant's response. + * + * @see {@link ChatGPTSendMessageOptions} + */ public async send( message: string, options: ChatGPTSendMessageOptions = { @@ -213,6 +248,9 @@ export class ChatGPTSession { return await this.processAssistantMessage(response.choices[0].message, options); } + /** + * @returns The messages sent to and from the assistant so far. + */ get messages(): ChatGPTSessionMessage[] { return this.sessionMessages; } @@ -268,6 +306,9 @@ export class ChatGPTSession { return message.content!; } + /** + * @ignore + */ public getFunctionSchema() { const schema = Object.values(this.metadata.functions).map((f) => ({ name: f.name, diff --git a/tsconfig.json b/tsconfig.json index 46c7c09..f1b9d5a 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -24,5 +24,11 @@ "exclude": [ "dist", "node_modules" - ] + ], + "typedocOptions": { + "entryPoints": ["index.ts"], + "out": "doc", + "readme": "none", + "plugin": ["typedoc-plugin-markdown"] + } }