diff --git a/README.md b/README.md index 360c322..37ae2c0 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ # 简介 -Tars 是一个 Obsidian 插件,支持 Kimi、豆包、阿里千问、百度千帆、智谱 等等中文大型语言模型(LLMs)基于标签建议进行文本生成。Tars 这个名字来源于电影《星际穿越》中的机器人 Tars。 +Tars 是一个 Obsidian 插件,基于标签建议进行文本生成,支持 Claude、OpenAI、Kimi、豆包、阿里千问、智谱、深度求索、百度千帆等。Tars 这个名字来源于电影《星际穿越》中的机器人 Tars。 ## 特性 @@ -23,9 +23,10 @@ Tars 是一个 Obsidian 插件,支持 Kimi、豆包、阿里千问、百度千 ## AI 服务提供商 +- [Claude](https://claude.ai) +- [OpenAI](https://platform.openai.com/api-keys) - [Kimi](https://www.moonshot.cn) - [Doubao 豆包](https://www.volcengine.com/product/doubao) -- [OpenAI](https://platform.openai.com/api-keys) - [Qianfan 百度千帆](https://qianfan.cloud.baidu.com) - [Qwen 阿里千问](https://dashscope.console.aliyun.com) - [Zhipu 智谱](https://open.bigmodel.cn/) @@ -35,9 +36,15 @@ Tars 是一个 Obsidian 插件,支持 Kimi、豆包、阿里千问、百度千 ## 如何使用 -在设置页面添加一个 AI 助手,设置 API 密钥,然后在编辑器中使用相应的标签来触发 AI 助手。 +在设置页面添加一个 AI 助手,设置 API 密钥,然后在编辑器中使用相应的标签来触发 AI 助手。通过对话形式来触发,先有用户消息,然后才能触发 AI 助手回答问题。 + +```text +#我 : 1+1=?(用户消息) +(隔开一个空行) +#Claude : (触发) +``` -如果在设置页面的 AI 助手中没有你想要的 model 类型,服务器地址需要中转,可以在设置中的“覆盖输入参数”进行配置,输入 JSON 格式,例如 `{"model":"你想要的model", "baseURL": "中转地址"}`。 +如果在设置页面的 AI 助手中没有你想要的 model 类型,或者服务器地址需要自定义,可以在设置中的“覆盖输入参数”进行配置,输入 JSON 格式,例如 `{"model":"你想要的model", "baseURL": "自定义地址"}`。 ## 对话语法 diff --git a/README_en.md b/README_en.md index 595023e..7fb72ed 100644 --- a/README_en.md +++ b/README_en.md @@ -7,7 +7,7 @@ # Introduction -Tars is an Obsidian plugin that supports text generation by Kimi, Doubao, Ali Qianwen, Baidu Qianfan, Zhipu, and other Chinese large language models (LLMs) based on tag suggestions. The name Tars comes from the robot Tars in the movie "Interstellar". +Tars is an Obsidian plugin that supports text generation based on tag suggestions, using services like Claude, OpenAI, Kimi, Doubao, Qwen, Zhipu, DeepSeek, QianFan & more. The name Tars comes from the robot Tars in the movie "Interstellar". ## Features @@ -23,9 +23,10 @@ Tars is an Obsidian plugin that supports text generation by Kimi, Doubao, Ali Qi ## AI providers +- [Claude](https://claude.ai) +- [OpenAI](https://platform.openai.com/api-keys) - [Kimi](https://www.moonshot.cn) - [Doubao](https://www.volcengine.com/product/doubao) -- [OpenAI](https://platform.openai.com/api-keys) - [Qianfan](https://qianfan.cloud.baidu.com) - [Qwen](https://dashscope.console.aliyun.com) - [Zhipu](https://open.bigmodel.cn/) @@ -35,9 +36,15 @@ If the AI provider you want is not in the list above, you can propose a specific ## How to use -Add an AI assistant in the settings page, set the API key, and then use the corresponding tag in the editor to trigger the AI assistant. +Add an AI assistant in the settings page, set the API key, and then use the corresponding tag in the editor to trigger the AI assistant. Trigger through a conversation form, with user messages first, then trigger the AI assistant to answer questions. + +```text +#User : 1+1=?(user message) +(blank line) +#Claude :(trigger) +``` -If the model type you want is not in the AI assistant on the settings page, you can configure it in the "Override input parameters" in the settings, input JSON format, for example `{"model":"your desired model"}`. +If the model type you want is not in the AI assistant in the settings page, or the server address needs to be customized, you can configure it in the "Override input parameters" in the settings, input JSON format, for example `{"model":"your model", "baseURL": "your url"}`. ## Conversations syntax diff --git a/manifest.json b/manifest.json index 5ad3648..f9c450e 100644 --- a/manifest.json +++ b/manifest.json @@ -1,9 +1,9 @@ { "id": "tars", "name": "Tars", - "version": "0.3.1", + "version": "0.4.0", "minAppVersion": "1.5.8", - "description": "Use Kimi and other Chinese LLMs for text generation based on tag suggestions.", + "description": "Text generation based on tag suggestions, using Claude, OpenAI, Kimi, Doubao, Qwen, Zhipu, DeepSeek, QianFan & more.", "author": "Tarslab", "authorUrl": "https://github.com/tarslab", "isDesktopOnly": true diff --git a/package-lock.json b/package-lock.json index b03833e..6064fb6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,20 +1,20 @@ { "name": "obsidian-tars", - "version": "0.3.0", + "version": "0.4.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "obsidian-tars", - "version": "0.3.0", + "version": "0.4.0", "license": "MIT", "devDependencies": { - "@anthropic-ai/sdk": "^0.25.1", "@types/node": "^16.18.101", "@typescript-eslint/eslint-plugin": "^6.19.1", "@typescript-eslint/parser": "^6.19.1", "builtin-modules": "3.3.0", "esbuild": "0.17.3", + "https-proxy-agent": "^7.0.5", "jose": "^5.2.4", "node-fetch": "^3.3.2", "obsidian": "latest", @@ -33,50 +33,6 @@ "node": ">=0.10.0" } }, - "node_modules/@anthropic-ai/sdk": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.25.1.tgz", - "integrity": "sha512-+qF6hRax1XzpbZTy1YqQMTwOPY3W1B5PFS5ZXgmPl1V/lDqXm2uYFCpnSR3DREz1FRZ4lflzbK1cAhr88FvCpw==", - "dev": true, - "dependencies": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" - } - }, - "node_modules/@anthropic-ai/sdk/node_modules/@types/node": { - "version": "18.19.44", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.44.tgz", - "integrity": "sha512-ZsbGerYg72WMXUIE9fYxtvfzLEuq6q8mKERdWFnqTmOvudMxnz+CBNRoOwJ2kNpFOncrKjT1hZwxjlFgQ9qvQA==", - "dev": true, - "dependencies": { - "undici-types": "~5.26.4" - } - }, - "node_modules/@anthropic-ai/sdk/node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "dev": true, - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, "node_modules/@codemirror/state": { "version": "6.4.1", "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", @@ -885,6 +841,18 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/agent-base": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz", + "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==", + "dev": true, + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/agentkeepalive": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", @@ -1671,6 +1639,19 @@ "node": ">=8" } }, + "node_modules/https-proxy-agent": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz", + "integrity": "sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==", + "dev": true, + "dependencies": { + "agent-base": "^7.0.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/humanize-ms": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", diff --git a/package.json b/package.json index 5fa5223..e74b109 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "obsidian-tars", - "version": "0.3.1", + "version": "0.4.0", "description": "Use Kimi and other Chinese LLMs for text generation based on tag suggestions.", "main": "main.js", "scripts": { @@ -12,12 +12,12 @@ "author": "C Jack", "license": "MIT", "devDependencies": { - "@anthropic-ai/sdk": "^0.25.1", "@types/node": "^16.18.101", "@typescript-eslint/eslint-plugin": "^6.19.1", "@typescript-eslint/parser": "^6.19.1", "builtin-modules": "3.3.0", "esbuild": "0.17.3", + "https-proxy-agent": "^7.0.5", "jose": "^5.2.4", "node-fetch": "^3.3.2", "obsidian": "latest", diff --git a/src/lang/locale/en.ts b/src/lang/locale/en.ts index 0d95204..f636a45 100644 --- a/src/lang/locale/en.ts +++ b/src/lang/locale/en.ts @@ -48,9 +48,13 @@ export default { Model: 'Model', 'Select the model to use': 'Select the model to use', 'Input the model to use': 'Input the model to use', + 'Please enter a number': 'Please enter a number', + 'Minimum value is 256': 'Minimum value is 256', + 'Proxy URL': 'Proxy URL', + 'Invalid URL': 'Invalid URL', 'Override input parameters': 'Override input parameters', - 'Developer feature, in JSON format, for example, {"model": "gptX"} can override the model input parameter.': - 'Developer feature, in JSON format, for example, {"model": "gptX"} can override the model input parameter.', + 'Developer feature, in JSON format. e.g. {"model": "your model", "baseURL": "your url"}': + 'Developer feature, in JSON format. e.g. {"model": "your model", "baseURL": "your url"}', 'Remove AI assistant': 'Remove AI assistant', Remove: 'Remove', diff --git a/src/lang/locale/zh-cn.ts b/src/lang/locale/zh-cn.ts index 20b171b..52e2b2e 100644 --- a/src/lang/locale/zh-cn.ts +++ b/src/lang/locale/zh-cn.ts @@ -47,9 +47,13 @@ export default { Model: '模型', 'Select the model to use': '选择要使用的模型', 'Input the model to use': '输入要使用的模型', + 'Please enter a number': '请输入一个数字', + 'Minimum value is 256': '最小值是256', + 'Proxy URL': '代理 URL', + 'Invalid URL': '无效的 URL', 'Override input parameters': '覆盖输入参数', - 'Developer feature, in JSON format, for example, {"model": "gptX"} can override the model input parameter.': - '开发者功能,json格式, 比如{"model": "gptX"}可以覆盖model输入参数,如果model下拉框没有对应的模型,想要使用新的模型,可以在这里输入', + 'Developer feature, in JSON format. e.g. {"model": "your model", "baseURL": "your url"}': + '开发者功能,以 JSON 格式。例如 {"model": "你想要的model", "baseURL": "自定义地址"}', 'Remove AI assistant': '移除 AI 助手', Remove: '移除', diff --git a/src/providers/claude.ts b/src/providers/claude.ts index 23e5b1c..155db69 100644 --- a/src/providers/claude.ts +++ b/src/providers/claude.ts @@ -1,41 +1,71 @@ -import Anthropic from '@anthropic-ai/sdk' - +import { HttpsProxyAgent } from 'https-proxy-agent' import { t } from 'src/lang/helper' -import { BaseOptions, Message, SendRequest, Vendor } from '.' +import fetch from 'node-fetch' +import { BaseOptions, Message, SendRequest, Vendor, Optional } from '.' -interface ClaudeMessage { - role: 'user' | 'assistant' - content: string -} +type ClaudeOptions = BaseOptions & Pick -const sendRequestFunc = (settings: BaseOptions): SendRequest => - async function* (rawMessages: Message[]) { +const sendRequestFunc = (settings: ClaudeOptions): SendRequest => + async function* (messages: Message[]) { const { parameters, ...optionsExcludingParams } = settings const options = { ...optionsExcludingParams, ...parameters } // 这样的设计,让parameters 可以覆盖掉前面的设置 optionsExcludingParams - const { apiKey, baseURL, model, ...remains } = options + const { apiKey, baseURL, model, max_tokens, proxyUrl, ...remains } = options if (!apiKey) throw new Error(t('API key is required')) - const messages = rawMessages.filter((m) => m.role === 'user' || m.role == 'assistant') as ClaudeMessage[] - const client = new Anthropic({ - apiKey, - baseURL - }) - - const stream = client.messages.stream({ + const [system_msg, messagesWithoutSys] = + messages[0].role === 'system' ? [messages[0], messages.slice(1)] : [null, messages] + const headers = { + 'Content-Type': 'application/json', + 'anthropic-version': '2023-06-01', + 'X-Api-Key': apiKey + } + const body = { model, - messages, - max_tokens: 1024 - // ...remains + system: system_msg?.content, + max_tokens, + messages: messagesWithoutSys, + stream: true, + ...remains + } + console.debug('proxyUrl', proxyUrl) + console.debug('claude api body', JSON.stringify(body)) + const response = await fetch(baseURL, { + method: 'POST', + headers, + body: JSON.stringify(body), + agent: proxyUrl ? new HttpsProxyAgent(proxyUrl) : undefined }) - // for await (const part of stream) { - // const text = part.choices[0]?.delta?.content - // if (!text) continue - // yield text - // } - for await (const event of stream) { - console.log('event', event) - yield 'todo' + if (!response || !response.body) { + throw new Error('No response') + } + + const decoder = new TextDecoder('utf-8') + // 参考 https://docs.anthropic.com/en/api/messages-streaming + for await (const chunk of response.body) { + const lines = decoder.decode(Buffer.from(chunk)) + // console.debug('lines', lines) + const [firstLine, secondLine, _] = lines.split('\n') + if (!firstLine.startsWith('event: ')) { + // {"type":"error","error":{"type":"invalid_request_error","message":"max_tokens: 8192 > 4096, which is the maximum allowed number of output tokens for claude-3-opus-20240229"}} + throw new Error(lines) + } + + const event = firstLine.slice('event: '.length).trim() + const dataStr = secondLine.slice('data:'.length) + const data = JSON.parse(dataStr) + switch (event) { + case 'content_block_delta': + yield data.delta.text + break + case 'message_delta': + if (data.delta.stop_reason !== 'end_turn') { + throw new Error(`Unexpected stop reason: ${data.delta.stop_reason}`) + } + break + case 'error': + throw new Error(data.error.message) + } } } @@ -45,11 +75,13 @@ export const claudeVendor: Vendor = { name: 'Claude', defaultOptions: { apiKey: '', - baseURL: 'https://fast.bemore.lol', + baseURL: 'https://api.anthropic.com/v1/messages', model: models[0], + max_tokens: 1024, + proxyUrl: '', parameters: {} - }, + } as ClaudeOptions, sendRequestFunc, models, - websiteToObtainKey: '' + websiteToObtainKey: 'https://console.anthropic.com' } diff --git a/src/providers/index.ts b/src/providers/index.ts index 6f77f28..73e2121 100644 --- a/src/providers/index.ts +++ b/src/providers/index.ts @@ -25,6 +25,8 @@ export interface ProviderSettings { options: BaseOptions } -export interface SecretOptions extends BaseOptions { +export interface Optional { apiSecret: string + proxyUrl: string + max_tokens: number } diff --git a/src/providers/qianFan.ts b/src/providers/qianFan.ts index 4f528eb..f3b1fce 100644 --- a/src/providers/qianFan.ts +++ b/src/providers/qianFan.ts @@ -1,6 +1,6 @@ import fetch from 'node-fetch' import { t } from 'src/lang/helper' -import { Message, SecretOptions, SendRequest, Vendor } from '.' +import { Message, SendRequest, Vendor, BaseOptions, Optional } from '.' interface TokenResponse { access_token: string @@ -14,9 +14,7 @@ interface Token { apiSecret: string } -export interface QianFanOptions extends SecretOptions { - token?: Token -} +type QianFanOptions = BaseOptions & Pick & { token?: Token } const createToken = async (apiKey: string, apiSecret: string) => { if (!apiKey || !apiSecret) throw new Error('Invalid API key secret') @@ -96,7 +94,7 @@ const sendRequestFunc = (settings: QianFanOptions): SendRequest => const data = { messages, stream: true, - remains + ...remains } const response = await fetch(baseURL + `/${model}?access_token=${token.accessToken}`, { method: 'POST', @@ -147,7 +145,7 @@ export const qianFanVendor: Vendor = { baseURL: 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat', model: models[0], parameters: {} - } as SecretOptions, + } as QianFanOptions, sendRequestFunc, models: models, websiteToObtainKey: 'https://qianfan.cloud.baidu.com' diff --git a/src/providers/qwen.ts b/src/providers/qwen.ts index bcea0a0..2a9154a 100644 --- a/src/providers/qwen.ts +++ b/src/providers/qwen.ts @@ -17,7 +17,7 @@ const sendRequestFunc = (settings: BaseOptions): SendRequest => parameters: { incremental_output: 'true' }, - remains + ...remains } const response = await fetch(baseURL, { method: 'POST', @@ -53,15 +53,7 @@ const sendRequestFunc = (settings: BaseOptions): SendRequest => } } -const models = [ - 'qwen-turbo', - 'qwen-plus', - 'qwen-max', - 'qwen-max-0428', - 'qwen-max-0403', - 'qwen-max-0107', - 'qwen-max-longcontext' -] +const models = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-max-longcontext'] export const qwenVendor: Vendor = { name: 'Qwen', diff --git a/src/settingTab.ts b/src/settingTab.ts index 4d5c151..445df48 100644 --- a/src/settingTab.ts +++ b/src/settingTab.ts @@ -1,7 +1,7 @@ import { App, Notice, PluginSettingTab, Setting } from 'obsidian' import { t } from './lang/helper' import TarsPlugin from './main' -import { BaseOptions, ProviderSettings, SecretOptions } from './providers' +import { BaseOptions, ProviderSettings, Optional } from './providers' import { DEFAULT_SETTINGS, availableVendors } from './settings' export class TarsSettingTab extends PluginSettingTab { @@ -116,14 +116,20 @@ export class TarsSettingTab extends PluginSettingTab { vendor.websiteToObtainKey ? t('Obtain key from ') + vendor.websiteToObtainKey : '' ) - if ('apiSecret' in settings.options) { - this.addAPISecretSection(details, settings.options as SecretOptions) - } + if ('apiSecret' in settings.options) + this.addAPISecretOptional(details, settings.options as BaseOptions & Pick) + if (vendor.models.length > 0) { this.addModelDropDownSection(details, settings.options, vendor.models) } else { this.addModelTextSection(details, settings.options) } + if ('max_tokens' in settings.options) + this.addMaxTokensOptional(details, settings.options as BaseOptions & Pick) + + if ('proxyUrl' in settings.options) + this.addProxyUrlOptional(details, settings.options as BaseOptions & Pick) + this.addParametersSection(details, settings.options) new Setting(details).setName(t('Remove') + ' ' + vendor.name).addButton((btn) => { @@ -178,7 +184,11 @@ export class TarsSettingTab extends PluginSettingTab { }) ) - addAPISecretSection = (details: HTMLDetailsElement, options: SecretOptions, desc: string = '') => + addAPISecretOptional = ( + details: HTMLDetailsElement, + options: BaseOptions & Pick, + desc: string = '' + ) => new Setting(details) .setName('API Secret') .setDesc(desc) @@ -225,12 +235,54 @@ export class TarsSettingTab extends PluginSettingTab { }) ) + addMaxTokensOptional = (details: HTMLDetailsElement, options: BaseOptions & Pick) => + new Setting(details).setName('Max tokens').addText((text) => + text + .setPlaceholder('') + .setValue(options.max_tokens.toString()) + .onChange(async (value) => { + const number = parseInt(value) + if (isNaN(number)) { + new Notice(t('Please enter a number')) + return + } + if (number < 1024) { + new Notice(t('Minimum value is 256')) + return + } + options.max_tokens = number + await this.plugin.saveSettings() + }) + ) + + addProxyUrlOptional = (details: HTMLDetailsElement, options: BaseOptions & Pick) => + new Setting(details) + .setName(t('Proxy URL')) + .setDesc('e.g. http://127.0.0.1:7890') + .addText((text) => + text + .setPlaceholder('') + .setValue(options.proxyUrl) + .onChange(async (value) => { + const url = value.trim() + if (url.length === 0) { + // 空字符串是合法的,清空proxyUrl + options.proxyUrl = '' + await this.plugin.saveSettings() + } else if (!isValidUrl(url)) { + new Notice(t('Invalid URL')) + return + } else { + options.proxyUrl = url + await this.plugin.saveSettings() + } + }) + ) + addParametersSection = (details: HTMLDetailsElement, options: BaseOptions) => new Setting(details) .setName(t('Override input parameters')) - .setDesc( - t('Developer feature, in JSON format, for example, {"model": "gptX"} can override the model input parameter.') - ) + .setDesc(t('Developer feature, in JSON format. e.g. {"model": "your model", "baseURL": "your url"}')) .addTextArea((text) => text .setPlaceholder('{}') @@ -269,3 +321,12 @@ const validateTagList = (tags: string[]) => { } return true } + +const isValidUrl = (url: string) => { + try { + new URL(url) + return true + } catch (e) { + return false + } +} diff --git a/versions.json b/versions.json index 4b50600..7781895 100644 --- a/versions.json +++ b/versions.json @@ -5,5 +5,6 @@ "0.2.0": "1.5.8", "0.2.1": "1.5.8", "0.3.0": "1.5.8", - "0.3.1": "1.5.8" + "0.3.1": "1.5.8", + "0.4.0": "1.5.8" } \ No newline at end of file