diff --git a/.env.local.demo b/.env.local.demo
index 7499573..ebce274 100644
--- a/.env.local.demo
+++ b/.env.local.demo
@@ -44,4 +44,7 @@ GOOGLE_SEARCH_ENGINE_ID=
GOOGLE_SEARCH_API_KEY=
# SERPER API KEY
-SERPER_API_KEY=
\ No newline at end of file
+SERPER_API_KEY=
+
+# OpenRouter API KEY
+NEXT_PUBLIC_OPENROUTER_API_KEY=
\ No newline at end of file
diff --git a/CHANGE_LOG.md b/CHANGE_LOG.md
index d661e35..55b7999 100755
--- a/CHANGE_LOG.md
+++ b/CHANGE_LOG.md
@@ -2,23 +2,27 @@
## v0.8.3
-> 2023-08-12
+> 2023-08-13
### Fixed
- Fixed mobile session content obscuring the bottom input box
- Refactored function calling invocation logic and fixed bugs
+- Fixed the "drift" of the drop-down selection box when selecting a new session model
### Add
- Added function calling support
- Added plugin system
- Added support for Google search, which can call the Google API to search and return results when encountering problems that exceed the AI model training date
+- Introduced OpenRouter to support Claude, PaLM2, Llama 2 and other models
### Changed
- Adjusted the text input box for editing chat content to Textarea
- Replaced Google search with [Serper API](https://serper.dev/), which is easier to configure
+- All models use openai gpt-3.5-turbo to get conversation titles, saving token consumption
+- When using the models provided by OpenRouter, the plugins are hidden because they do not support plugins at this time
## v0.8.2
diff --git a/CHANGE_LOG.zh_CN.md b/CHANGE_LOG.zh_CN.md
index 6d1e350..88e9299 100755
--- a/CHANGE_LOG.zh_CN.md
+++ b/CHANGE_LOG.zh_CN.md
@@ -2,23 +2,27 @@
## v0.8.3
-> 2023-08-12
+> 2023-08-13
### 修复
- 修复移动端会话内容遮挡底部输入框的问题
- 重构 function calling 的调用逻辑,修复 bug
+- 修复新会话选择模型时,下拉选择框出现“漂移”的情况
### 新增
- 新增 function calling 支持.
- 新增插件系统
- 新增支持谷歌搜索,在遇到超出 AI 模型训练日期的问题时能够调用谷歌 api 进行搜索并返回结果
+- 引入 OpenRouter,支持 Claude、PaLM2、Llama 2 等模型
### 调整
- 调整编辑聊天内容的文本输入框为 Textarea
- 将谷歌搜索 由官方 API 更换为 [Serper API](https://serper.dev/),配置更方便
+- 各个模型在获取会话标题时统一使用 openai gpt-3.5-turbo,节省 token 消耗
+- 在使用 OpenRouter 提供的模型时,隐藏插件,因为他们暂不支持插件
## v0.8.2
diff --git a/package.json b/package.json
index 8dca62c..ba65681 100755
--- a/package.json
+++ b/package.json
@@ -21,7 +21,7 @@
"@react-email/components": "0.0.7",
"@react-email/render": "0.0.7",
"@svgr/webpack": "8.0.1",
- "@types/node": "20.4.9",
+ "@types/node": "20.4.10",
"@types/react": "18.2.20",
"@types/react-dom": "18.2.7",
"@upstash/redis": "1.22.0",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 866277d..6a29e24 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -27,8 +27,8 @@ dependencies:
specifier: 8.0.1
version: 8.0.1
'@types/node':
- specifier: 20.4.9
- version: 20.4.9
+ specifier: 20.4.10
+ version: 20.4.10
'@types/react':
specifier: 18.2.20
version: 18.2.20
@@ -3335,13 +3335,13 @@ packages:
resolution: {integrity: sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==}
dev: false
- /@types/node@20.4.9:
- resolution: {integrity: sha512-8e2HYcg7ohnTUbHk8focoklEQYvemQmu9M/f43DZVx43kHn0tE3BY/6gSDxS7k0SprtS0NHvj+L80cGLnoOUcQ==}
+ /@types/node@20.4.10:
+ resolution: {integrity: sha512-vwzFiiy8Rn6E0MtA13/Cxxgpan/N6UeNYR9oUu6kuJWxu6zCk98trcDp8CBhbtaeuq9SykCmXkFr2lWLoPcvLg==}
/@types/nodemailer@6.4.9:
resolution: {integrity: sha512-XYG8Gv+sHjaOtUpiuytahMy2mM3rectgroNbs6R3djZEKmPNiIJwe9KqOJBGzKKnNZNKvnuvmugBgpq3w/S0ig==}
dependencies:
- '@types/node': 20.4.9
+ '@types/node': 20.4.10
dev: true
/@types/parse-json@4.0.0:
diff --git a/public/claude.webp b/public/claude.webp
new file mode 100644
index 0000000..f78bda7
Binary files /dev/null and b/public/claude.webp differ
diff --git a/public/palm.webp b/public/palm.webp
new file mode 100644
index 0000000..1e170b3
Binary files /dev/null and b/public/palm.webp differ
diff --git a/src/app/api/azure/function_call.ts b/src/app/api/azure/function_call.ts
index 5869522..eeb18b9 100644
--- a/src/app/api/azure/function_call.ts
+++ b/src/app/api/azure/function_call.ts
@@ -53,7 +53,7 @@ export const function_call = async ({
}: IFunctionCall & { plugins: fn_call[] }) => {
try {
const temperature = isUndefined(p_temperature) ? 1 : p_temperature;
- const max_tokens = isUndefined(p_max_tokens) ? 2000 : p_max_tokens;
+ const max_tokens = isUndefined(p_max_tokens) ? 1000 : p_max_tokens;
const response = await fetchAzureOpenAI({
fetchURL,
diff --git a/src/app/api/azure/regular.ts b/src/app/api/azure/regular.ts
index 805b6ca..9631c49 100644
--- a/src/app/api/azure/regular.ts
+++ b/src/app/api/azure/regular.ts
@@ -29,7 +29,7 @@ const fetchAzureOpenAI = async ({
presence_penalty: 0,
stream: true,
temperature: isUndefined(temperature) ? 1 : temperature,
- max_tokens: isUndefined(max_tokens) ? 2000 : max_tokens,
+ max_tokens: isUndefined(max_tokens) ? 1000 : max_tokens,
messages,
stop: null,
}),
diff --git a/src/app/api/openRouter/regular.ts b/src/app/api/openRouter/regular.ts
new file mode 100644
index 0000000..c4ff752
--- /dev/null
+++ b/src/app/api/openRouter/regular.ts
@@ -0,0 +1,84 @@
+import { ResErr, isUndefined } from "@/lib";
+import { stream } from "@/lib/stream";
+import type { supportModelType } from "@/lib/calcTokens/gpt-tokens";
+import type { IFetchOpenRouter } from "./types";
+
+interface IRegular extends IFetchOpenRouter {
+ prompt?: string;
+ modelLabel: supportModelType;
+ userId?: string;
+ headerApiKey?: string;
+}
+
+const fetchOpenRouter = async ({
+ fetchURL,
+ Authorization,
+ model,
+ temperature,
+ max_tokens,
+ messages,
+}: IFetchOpenRouter) => {
+ return await fetch(fetchURL, {
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${Authorization}`,
+ "HTTP-Referer": "https://chat.ltopx.com",
+ "X-Title": "L-GPT",
+ },
+ method: "POST",
+ body: JSON.stringify({
+ stream: true,
+ model,
+ temperature: isUndefined(temperature) ? 1 : temperature,
+ max_tokens: isUndefined(max_tokens) ? 1000 : max_tokens,
+ messages,
+ }),
+ });
+};
+
+export const regular = async ({
+ prompt,
+ messages,
+ fetchURL,
+ Authorization,
+ model,
+ modelLabel,
+ temperature,
+ max_tokens,
+ userId,
+ headerApiKey,
+}: IRegular) => {
+ if (prompt) messages.unshift({ role: "system", content: prompt });
+
+ try {
+ const response = await fetchOpenRouter({
+ fetchURL,
+ Authorization,
+ model,
+ temperature,
+ max_tokens,
+ messages,
+ });
+
+ if (response.status !== 200) {
+ return new Response(response.body, { status: 500 });
+ }
+
+ const { readable, writable } = new TransformStream();
+
+ stream({
+ readable: response.body as ReadableStream,
+ writable,
+ userId,
+ headerApiKey,
+ messages,
+ model,
+ modelLabel,
+ });
+
+ return new Response(readable, response);
+ } catch (error: any) {
+ console.log(error, "openrouter regular error");
+ return ResErr({ msg: error?.message || "Error" });
+ }
+};
diff --git a/src/app/api/openRouter/route.ts b/src/app/api/openRouter/route.ts
new file mode 100644
index 0000000..9762438
--- /dev/null
+++ b/src/app/api/openRouter/route.ts
@@ -0,0 +1,75 @@
+import { headers } from "next/headers";
+import { getServerSession } from "next-auth/next";
+import { authOptions } from "@/utils/plugin/auth";
+import { prisma } from "@/lib/prisma";
+import { ResErr } from "@/lib";
+import { PREMIUM_MODELS } from "@/hooks/useLLM";
+import { regular } from "./regular";
+
+export async function POST(request: Request) {
+ const session = await getServerSession(authOptions);
+ const headersList = headers();
+ const headerApiKey = headersList.get("Authorization") || "";
+ const API_KEY = process.env.NEXT_PUBLIC_OPENROUTER_API_KEY;
+
+ const {
+ // model 用于接口发送给 OpenRouter 的请求参数
+ model,
+ // modelLabel 用于 Token 计算
+ modelLabel,
+ temperature,
+ max_tokens,
+ prompt,
+ chat_list,
+ } = await request.json();
+
+ /**
+ * If not logged in, only the locally configured API Key can be used.
+ */
+ if (!session && !headerApiKey) return ResErr({ error: 10001 });
+
+ if (!headerApiKey) {
+ const user = await prisma.user.findUnique({
+ where: { id: session?.user.id },
+ });
+ if (!user) return ResErr({ error: 20002 });
+
+ // audit user license
+ if (
+ user.license_type !== "premium" &&
+ user.license_type !== "team" &&
+ PREMIUM_MODELS.includes(modelLabel)
+ ) {
+ return ResErr({ error: 20009 });
+ }
+
+ const { availableTokens } = user;
+ if (availableTokens <= 0) return ResErr({ error: 10005 });
+ }
+
+ // first use local
+ // then use env configuration
+ // or empty
+ const Authorization = headerApiKey || API_KEY || "";
+
+ if (!Authorization) return ResErr({ error: 10002 });
+
+ const fetchURL = "https://openrouter.ai/api/v1/chat/completions";
+
+ const messages = [...chat_list];
+
+ const userId = session?.user.id;
+
+ return await regular({
+ prompt,
+ messages,
+ fetchURL,
+ Authorization,
+ model,
+ modelLabel,
+ temperature,
+ max_tokens,
+ userId,
+ headerApiKey,
+ });
+}
diff --git a/src/app/api/openRouter/types.ts b/src/app/api/openRouter/types.ts
new file mode 100644
index 0000000..277a7b3
--- /dev/null
+++ b/src/app/api/openRouter/types.ts
@@ -0,0 +1,8 @@
+export interface IFetchOpenRouter {
+ messages: any[];
+ fetchURL: string;
+ Authorization: string;
+ model: string;
+ temperature?: number;
+ max_tokens?: number;
+}
diff --git a/src/app/api/openai/function_call.ts b/src/app/api/openai/function_call.ts
index 4e40dbd..3355244 100644
--- a/src/app/api/openai/function_call.ts
+++ b/src/app/api/openai/function_call.ts
@@ -53,7 +53,7 @@ export const function_call = async ({
}: IFunctionCall) => {
try {
const temperature = isUndefined(p_temperature) ? 1 : p_temperature;
- const max_tokens = isUndefined(p_max_tokens) ? 2000 : p_max_tokens;
+ const max_tokens = isUndefined(p_max_tokens) ? 1000 : p_max_tokens;
const response = await fetchOpenAI({
fetchURL,
diff --git a/src/app/api/openai/regular.ts b/src/app/api/openai/regular.ts
index 6ce4125..f86ee2b 100644
--- a/src/app/api/openai/regular.ts
+++ b/src/app/api/openai/regular.ts
@@ -28,7 +28,7 @@ const fetchOpenAI = async ({
stream: true,
model,
temperature: isUndefined(temperature) ? 1 : temperature,
- max_tokens: isUndefined(max_tokens) ? 2000 : max_tokens,
+ max_tokens: isUndefined(max_tokens) ? 1000 : max_tokens,
messages,
}),
});
diff --git a/src/app/api/openai/route.ts b/src/app/api/openai/route.ts
index 4ce94f6..bdd1802 100644
--- a/src/app/api/openai/route.ts
+++ b/src/app/api/openai/route.ts
@@ -19,7 +19,7 @@ export async function POST(request: Request) {
const session = await getServerSession(authOptions);
const headersList = headers();
const headerApiKey = headersList.get("Authorization") || "";
- const NEXT_PUBLIC_OPENAI_API_KEY = process.env.NEXT_PUBLIC_OPENAI_API_KEY;
+ const API_KEY = process.env.NEXT_PUBLIC_OPENAI_API_KEY;
const {
// model 用于接口发送给 OpenAI 或者其他大语言模型的请求参数
@@ -61,7 +61,7 @@ export async function POST(request: Request) {
// first use local
// then use env configuration
// or empty
- const Authorization = headerApiKey || NEXT_PUBLIC_OPENAI_API_KEY || "";
+ const Authorization = headerApiKey || API_KEY || "";
if (!Authorization) return ResErr({ error: 10002 });
diff --git a/src/components/chatSection/chatConfigure/index.tsx b/src/components/chatSection/chatConfigure/index.tsx
index 8443a68..044a712 100644
--- a/src/components/chatSection/chatConfigure/index.tsx
+++ b/src/components/chatSection/chatConfigure/index.tsx
@@ -36,7 +36,10 @@ const renderLabel = (item: any) => {
const renderModelLabel = (item: any) => {
return (
-
{item.label}
+
+ {!!item.icon && {item.icon}}
+ {item.label}
+
{!!item.premium && (
{
"dark:border-orange-500 dark:text-orange-500 dark:bg-orange-50/90"
)}
>
- PREMIUM
+ PRO
)}
@@ -57,9 +60,17 @@ export default function ChatConfigure({ list, channel }: ChatConfigureProps) {
const tCommon = useTranslations("common");
const [isShow, setIsShow] = React.useState(true);
+ const [isAnimation, setIsAnimation] = React.useState(false);
- const [openai, azure] = useLLMStore((state) => [state.openai, state.azure]);
- const LLMOptions = React.useMemo(() => [openai, azure], [openai, azure]);
+ const [openai, azure, openRouter] = useLLMStore((state) => [
+ state.openai,
+ state.azure,
+ state.openRouter,
+ ]);
+ const LLMOptions = React.useMemo(
+ () => [openai, azure, openRouter],
+ [openai, azure, openRouter]
+ );
const options =
LLMOptions.find((item) => item.value === channel.channel_model.type)
@@ -111,10 +122,12 @@ export default function ChatConfigure({ list, channel }: ChatConfigureProps) {
return (
setIsAnimation(true)}
+ onAnimationComplete={() => setIsAnimation(false)}
>
-
+ {channel.channel_model.type !== "openRouter" && (
+
+ )}
);
diff --git a/src/components/chatSection/chatFooter/handler/index.tsx b/src/components/chatSection/chatFooter/handler/index.tsx
index fa042dd..2caf7b1 100644
--- a/src/components/chatSection/chatFooter/handler/index.tsx
+++ b/src/components/chatSection/chatFooter/handler/index.tsx
@@ -1,4 +1,5 @@
import React from "react";
+import Image from "next/image";
import { useTranslations } from "next-intl";
import { Button, Confirm } from "@ltopx/lx-ui";
import toast from "react-hot-toast";
@@ -21,8 +22,15 @@ export default function Handler() {
state.activeId,
state.list,
]);
- const [openai, azure] = useLLMStore((state) => [state.openai, state.azure]);
- const LLMOptions = React.useMemo(() => [openai, azure], [openai, azure]);
+ const [openai, azure, openRouter] = useLLMStore((state) => [
+ state.openai,
+ state.azure,
+ state.openRouter,
+ ]);
+ const LLMOptions = React.useMemo(
+ () => [openai, azure, openRouter],
+ [openai, azure, openRouter]
+ );
const license_type = useUserInfoStore((state) => state.license_type);
const updateChatSettingOpen = useOpenStore(
@@ -68,6 +76,18 @@ export default function Handler() {
if (type === "azure") return ;
+ if (type === "openRouter") {
+ if (name.includes("google/palm")) {
+ return ;
+ }
+ if (name.includes("anthropic/claude")) {
+ return ;
+ }
+ if (name.includes("meta-llama")) {
+ return 🦙
;
+ }
+ }
+
return null;
};
diff --git a/src/components/chatSection/chatFooter/index.tsx b/src/components/chatSection/chatFooter/index.tsx
index 795a130..d6b586f 100644
--- a/src/components/chatSection/chatFooter/index.tsx
+++ b/src/components/chatSection/chatFooter/index.tsx
@@ -35,8 +35,15 @@ export default function ChatFooter() {
state.activeId,
state.list,
]);
- const [openai, azure] = useLLMStore((state) => [state.openai, state.azure]);
- const LLMOptions = React.useMemo(() => [openai, azure], [openai, azure]);
+ const [openai, azure, openRouter] = useLLMStore((state) => [
+ state.openai,
+ state.azure,
+ state.openRouter,
+ ]);
+ const LLMOptions = React.useMemo(
+ () => [openai, azure, openRouter],
+ [openai, azure, openRouter]
+ );
const license_type = useUserInfoStore((state) => state.license_type);
// ref
@@ -121,6 +128,7 @@ export default function ChatFooter() {
if (session.data) updateUserInfo(2000);
} catch (errRes: any) {
+ console.log(errRes, "errRes");
let errorMessage = "error";
if (errRes.error === 10001) {
return toast(
diff --git a/src/components/chatSection/chatList/avatar.tsx b/src/components/chatSection/chatList/avatar.tsx
index e7f835a..f09d907 100644
--- a/src/components/chatSection/chatList/avatar.tsx
+++ b/src/components/chatSection/chatList/avatar.tsx
@@ -39,6 +39,35 @@ function renderAssistantIcon(model?: ChannelModel) {
);
}
+ if (type === "openRouter") {
+ if (model.name.includes("anthropic")) {
+ return (
+
+
+
+ );
+ }
+ if (model.name.includes("google/palm")) {
+ return (
+
+
+
+ );
+ }
+ if (model.name.includes("meta-llama")) {
+ return (
+
+ 🦙
+
+ );
+ }
+ }
return null;
}
diff --git a/src/components/chatSection/chatSetting/form.tsx b/src/components/chatSection/chatSetting/form.tsx
index cb6f650..1413426 100644
--- a/src/components/chatSection/chatSetting/form.tsx
+++ b/src/components/chatSection/chatSetting/form.tsx
@@ -46,7 +46,10 @@ const renderLabel = (item: any) => {
const renderModelLabel = (item: any) => {
return (
-
{item.label}
+
+ {!!item.icon && {item.icon}}
+ {item.label}
+
{!!item.premium && (
{
"dark:border-orange-500 dark:text-orange-500 dark:bg-orange-50/90"
)}
>
- PREMIUM
+ PRO
)}
@@ -69,8 +72,15 @@ const ChatSettingForm = React.forwardRef(
const tPrompt = useTranslations("prompt");
const tPlugin = useTranslations("plugin");
- const [openai, azure] = useLLMStore((state) => [state.openai, state.azure]);
- const LLMOptions = React.useMemo(() => [openai, azure], [openai, azure]);
+ const [openai, azure, openRouter] = useLLMStore((state) => [
+ state.openai,
+ state.azure,
+ state.openRouter,
+ ]);
+ const LLMOptions = React.useMemo(
+ () => [openai, azure, openRouter],
+ [openai, azure, openRouter]
+ );
const [activeId, list] = useChannelStore((state) => [
state.activeId,
state.list,
diff --git a/src/components/configureModel/index.tsx b/src/components/configureModel/index.tsx
index 47904cf..a826784 100644
--- a/src/components/configureModel/index.tsx
+++ b/src/components/configureModel/index.tsx
@@ -3,6 +3,7 @@ import { Select, Divider } from "@ltopx/lx-ui";
import { useLLMStore } from "@/hooks/useLLM";
import OpenAI from "./openai";
import Azure from "./azure";
+import OpenRouter from "./openRouter";
const renderLabel = (item: any) => {
return (
@@ -15,9 +16,16 @@ const renderLabel = (item: any) => {
export default function ConfigureModel() {
const [model, setModel] = React.useState("");
- const [openai, azure] = useLLMStore((state) => [state.openai, state.azure]);
+ const [openai, azure, openRouter] = useLLMStore((state) => [
+ state.openai,
+ state.azure,
+ state.openRouter,
+ ]);
- const LLMOptions = React.useMemo(() => [openai, azure], [openai, azure]);
+ const LLMOptions = React.useMemo(
+ () => [openai, azure, openRouter],
+ [openai, azure, openRouter]
+ );
const findLLM = LLMOptions.find((item) => item.value === model);
@@ -38,6 +46,7 @@ export default function ConfigureModel() {
{findLLM?.ico_big}
{model === LLMOptions[0].value && }
{model === LLMOptions[1].value && }
+ {model === LLMOptions[2].value && }
>
);
}
diff --git a/src/components/configureModel/openRouter.tsx b/src/components/configureModel/openRouter.tsx
new file mode 100644
index 0000000..cee1204
--- /dev/null
+++ b/src/components/configureModel/openRouter.tsx
@@ -0,0 +1,96 @@
+import React from "react";
+import { useTranslations } from "next-intl";
+import { Input, Tooltip, Slider } from "@ltopx/lx-ui";
+import Icon from "@/components/icon";
+import { useOpenAIStore } from "@/hooks/useOpenAI";
+import type { OpenRouter } from "@/hooks/useOpenAI/types";
+import { cn } from "@/lib";
+
+export default function OpenRouter() {
+ const tSetting = useTranslations("setting");
+
+ const openRouter = useOpenAIStore((state) => state.openRouter);
+
+ const updateOpenRouter = useOpenAIStore((state) => state.updateOpenRouter);
+
+ const mapTemperature = (value: number) => {
+ if (value === 0) return tSetting("deterministic");
+ if (value === 0.5) return tSetting("neutral");
+ if (value === 1) return tSetting("random");
+ return "";
+ };
+
+ const onChange = (value: string | number, key: keyof OpenRouter) => {
+ updateOpenRouter({ ...openRouter, [key]: value });
+ };
+
+ return (
+
+
+
+
+ onChange(value, "apiKey")}
+ />
+
+
+
+
+ {tSetting("temperature")}
+
+
+
+
+
+
onChange(value, "temperature")}
+ />
+
+ {mapTemperature(openRouter.temperature)}
+
+
+
+
+
+ {tSetting("max-tokens")}
+
+
+
+
+
onChange(value, "max_tokens")}
+ />
+
+
+ );
+}
diff --git a/src/components/icon/index.tsx b/src/components/icon/index.tsx
index 0023faa..6fcd81f 100755
--- a/src/components/icon/index.tsx
+++ b/src/components/icon/index.tsx
@@ -79,10 +79,13 @@ import { Game_2_line } from "./game_2_line";
import { Star_fill } from "./star_fill";
import { Pencil_line } from "./pencil_line";
import { Plugin_2_line } from "./plugin_2_line";
+import { Open_Router } from "./open_router";
const Icon: React.FC = (props) => {
const { icon } = props;
+ if (icon === "open_router") return ;
+
if (icon === "plugin_2_line") return ;
if (icon === "pencil_line") return ;
diff --git a/src/components/icon/open_router.tsx b/src/components/icon/open_router.tsx
new file mode 100644
index 0000000..77ce016
--- /dev/null
+++ b/src/components/icon/open_router.tsx
@@ -0,0 +1,44 @@
+import type { IconProps } from "./types";
+
+export const Open_Router = ({
+ size = 16,
+ className,
+ style,
+ onClick,
+}: IconProps) => (
+
+);
diff --git a/src/components/icon/types.ts b/src/components/icon/types.ts
index 04ef6a8..95c6d2e 100755
--- a/src/components/icon/types.ts
+++ b/src/components/icon/types.ts
@@ -78,7 +78,8 @@ export type IconType =
| "game_2_line"
| "star_fill"
| "pencil_line"
- | "plugin_2_line";
+ | "plugin_2_line"
+ | "open_router";
export interface IconProps {
className?: string;
diff --git a/src/components/share/createChat.tsx b/src/components/share/createChat.tsx
index 38f67d4..9e59020 100755
--- a/src/components/share/createChat.tsx
+++ b/src/components/share/createChat.tsx
@@ -14,9 +14,15 @@ const CreateChat: React.FC = ({ content }) => {
const tShare = useTranslations("share");
const router = useRouter();
- const [openai, azure] = useLLMStore((state) => [state.openai, state.azure]);
-
- const LLMOptions = React.useMemo(() => [openai, azure], [openai, azure]);
+ const [openai, azure, openRouter] = useLLMStore((state) => [
+ state.openai,
+ state.azure,
+ state.openRouter,
+ ]);
+ const LLMOptions = React.useMemo(
+ () => [openai, azure, openRouter],
+ [openai, azure, openRouter]
+ );
const init = (content: any) => {
const localChannelList = localStorage.getItem("channelList");
diff --git a/src/components/site/logo.tsx b/src/components/site/logo.tsx
index 3403a3a..3237728 100755
--- a/src/components/site/logo.tsx
+++ b/src/components/site/logo.tsx
@@ -16,18 +16,26 @@ export default function Logo({
const router = useRouter();
const locale = useLocale();
+ const [isNew, setNew] = React.useState(false);
+
const onClick = () => {
if (disabled) return;
router.push("/");
};
const onCheckLog = () => {
+ localStorage.setItem("is_new_version", pkg.version);
const version = pkg.version.replace(/\./g, "");
const localePath = locale === "zh-CN" ? "zh-CN/" : "";
const url = `https://docs.ltopx.com/${localePath}change-log#v${version}`;
window.open(url);
};
+ React.useEffect(() => {
+ const is_new_version = localStorage.getItem("is_new_version");
+ if (pkg.version !== is_new_version) setNew(true);
+ }, []);
+
return (
{!!version && (
v{pkg.version}
+ {isNew && (
+
+ New
+
+ )}
)}
diff --git a/src/hooks/useChannel/index.tsx b/src/hooks/useChannel/index.tsx
index 4efaaa0..b50c92c 100755
--- a/src/hooks/useChannel/index.tsx
+++ b/src/hooks/useChannel/index.tsx
@@ -327,6 +327,7 @@ export const useChannelStore = createWithEqualityFn(
prompt,
plugins: findCh.channel_plugins,
};
+
if (modelType === "openai") {
params.proxy = modelConfig.proxy;
} else if (modelType === "azure") {
@@ -519,18 +520,21 @@ export const useChannelStore = createWithEqualityFn(
getChannelName: (params) => {
return new Promise((resolve, reject) => {
const { decoder } = streamDecoder();
+ const OpenAIStore = useOpenAIStore.getState();
const newParams = params.newParams;
+ newParams.model = "gpt-3.5-turbo";
+ newParams.modelLabel = "gpt-3.5-turbo";
newParams.chat_list = newParams.chat_list.map((item: any) => ({
role: item.role,
content: item.content,
}));
- fetch(params.fetchUrl, {
+ fetch("/api/openai", {
method: "POST",
headers: {
"Content-Type": "application/json",
- Authorization: params.apiKey,
+ Authorization: OpenAIStore.openai.apiKey,
},
body: JSON.stringify(newParams),
}).then(async (response) => {
diff --git a/src/hooks/useLLM/index.tsx b/src/hooks/useLLM/index.tsx
index c959c74..986aaf2 100644
--- a/src/hooks/useLLM/index.tsx
+++ b/src/hooks/useLLM/index.tsx
@@ -1,4 +1,5 @@
import { createWithEqualityFn } from "zustand/traditional";
+import Image from "next/image";
import { shallow } from "zustand/shallow";
import Icon from "@/components/icon";
import type { LLMStore } from "./types";
@@ -8,6 +9,11 @@ export const PREMIUM_MODELS = [
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
+ "Claude Instant v1",
+ "Claude v2",
+ "PaLM 2 Bison",
+ "Llama v2 13B",
+ "Llama v2 70B",
];
export const useLLMStore = createWithEqualityFn(
@@ -18,14 +24,42 @@ export const useLLMStore = createWithEqualityFn(
ico: ,
ico_big: ,
models: [
- { label: "gpt-3.5-turbo", value: "gpt-3.5-turbo" },
- { label: "gpt-3.5-turbo-0613", value: "gpt-3.5-turbo-0613" },
- { label: "gpt-3.5-turbo-16k", value: "gpt-3.5-turbo-16k" },
- { label: "gpt-3.5-turbo-16k-0613", value: "gpt-3.5-turbo-16k-0613" },
- { label: "gpt-4", value: "gpt-4", premium: true },
- { label: "gpt-4-0613", value: "gpt-4-0613", premium: true },
- { label: "gpt-4-32k", value: "gpt-4-32k", premium: true },
- { label: "gpt-4-32k-0613", value: "gpt-4-32k-0613", premium: true },
+ {
+ label: "gpt-3.5-turbo",
+ value: "gpt-3.5-turbo",
+ },
+ {
+ label: "gpt-3.5-turbo-0613",
+ value: "gpt-3.5-turbo-0613",
+ },
+ {
+ label: "gpt-3.5-turbo-16k",
+ value: "gpt-3.5-turbo-16k",
+ },
+ {
+ label: "gpt-3.5-turbo-16k-0613",
+ value: "gpt-3.5-turbo-16k-0613",
+ },
+ {
+ label: "gpt-4",
+ value: "gpt-4",
+ premium: true,
+ },
+ {
+ label: "gpt-4-0613",
+ value: "gpt-4-0613",
+ premium: true,
+ },
+ {
+ label: "gpt-4-32k",
+ value: "gpt-4-32k",
+ premium: true,
+ },
+ {
+ label: "gpt-4-32k-0613",
+ value: "gpt-4-32k-0613",
+ premium: true,
+ },
],
},
azure: {
@@ -40,6 +74,48 @@ export const useLLMStore = createWithEqualityFn(
{ label: "gpt-4-32k", value: "gpt-4-32k", premium: true },
],
},
+ openRouter: {
+ label: "OpenRouter",
+ value: "openRouter",
+ ico: ,
+ ico_big: ,
+ models: [
+ {
+ label: "Claude Instant v1",
+ value: "anthropic/claude-instant-v1",
+ icon: (
+
+ ),
+ premium: true,
+ },
+ {
+ label: "Claude v2",
+ value: "anthropic/claude-2",
+ icon: (
+
+ ),
+ premium: true,
+ },
+ {
+ label: "PaLM 2 Bison",
+ value: "google/palm-2-chat-bison",
+ icon: ,
+ premium: true,
+ },
+ {
+ label: "Llama v2 13B",
+ value: "meta-llama/llama-2-13b-chat",
+ icon: 🦙
,
+ premium: true,
+ },
+ {
+ label: "Llama v2 70B",
+ value: "meta-llama/llama-2-70b-chat",
+ icon: 🦙
,
+ premium: true,
+ },
+ ],
+ },
updateAzure: (models) => {
localStorage.setItem("azureModels", JSON.stringify(models));
diff --git a/src/hooks/useLLM/types.ts b/src/hooks/useLLM/types.ts
index ea3f3da..3f25dd7 100644
--- a/src/hooks/useLLM/types.ts
+++ b/src/hooks/useLLM/types.ts
@@ -3,6 +3,7 @@ import React from "react";
export type Model = {
label: string;
value: string;
+ icon?: React.ReactNode;
premium?: boolean;
};
@@ -17,6 +18,7 @@ export type ModelConfig = {
export type LLMStore = {
openai: ModelConfig;
azure: ModelConfig;
+ openRouter: ModelConfig;
updateAzure: (models: Model[]) => void;
};
diff --git a/src/hooks/useModelCache.ts b/src/hooks/useModelCache.ts
index cad9aef..efbcef8 100644
--- a/src/hooks/useModelCache.ts
+++ b/src/hooks/useModelCache.ts
@@ -27,8 +27,8 @@ export const useModelCacheStore = create((set) => ({
set((state) => {
if (!state.model_type || !state.model_name) return {};
- const { openai, azure } = useLLMStore.getState();
- const LLMOptions = [openai, azure];
+ const { openai, azure, openRouter } = useLLMStore.getState();
+ const LLMOptions = [openai, azure, openRouter];
const findModelType = LLMOptions.find(
(item) => item.value === state.model_type
);
diff --git a/src/hooks/useOpenAI/index.ts b/src/hooks/useOpenAI/index.ts
index 5d68edc..1a67916 100755
--- a/src/hooks/useOpenAI/index.ts
+++ b/src/hooks/useOpenAI/index.ts
@@ -1,6 +1,6 @@
import { createWithEqualityFn } from "zustand/traditional";
import { shallow } from "zustand/shallow";
-import type { OpenAIStore, OpenAI, Azure, Env } from "./types";
+import type { OpenAIStore, OpenAI, Azure, OpenRouter, Env } from "./types";
const getStorage = (key: string) => {
const localStore = localStorage.getItem(key);
@@ -18,13 +18,18 @@ export const useOpenAIStore = createWithEqualityFn(
apiKey: "",
proxy: "",
temperature: 1,
- max_tokens: 2000,
+ max_tokens: 1000,
},
azure: {
apiKey: "",
resourceName: "",
temperature: 1,
- max_tokens: 2000,
+ max_tokens: 1000,
+ },
+ openRouter: {
+ apiKey: "",
+ temperature: 1,
+ max_tokens: 1000,
},
env: {
OPENAI_API_KEY: "",
@@ -41,6 +46,11 @@ export const useOpenAIStore = createWithEqualityFn(
set({ azure });
},
+ updateOpenRouter: (openRouter: OpenRouter) => {
+ localStorage.setItem("openRouterConfig", JSON.stringify(openRouter));
+ set({ openRouter });
+ },
+
updateEnv: (env: Env) => {
set({ env });
},
@@ -51,6 +61,7 @@ export const useOpenAIStore = createWithEqualityFn(
export const useOpenAIInit = () => {
const updateOpenAI = useOpenAIStore((state) => state.updateOpenAI);
const updateAzure = useOpenAIStore((state) => state.updateAzure);
+ const updateOpenRouter = useOpenAIStore((state) => state.updateOpenRouter);
const updateEnv = useOpenAIStore((state) => state.updateEnv);
const init = () => {
@@ -58,16 +69,22 @@ export const useOpenAIInit = () => {
apiKey: "",
proxy: "",
temperature: 1,
- max_tokens: 2000,
+ max_tokens: 1000,
};
const localAzureConfig = getStorage("azureConfig") || {
apiKey: "",
resourceName: "",
temperature: 1,
- max_tokens: 2000,
+ max_tokens: 1000,
+ };
+ const localOpenRouterConfig = getStorage("openRouterConfig") || {
+ apiKey: "",
+ temperature: 1,
+ max_tokens: 1000,
};
updateOpenAI(localOpenAIConfig);
updateAzure(localAzureConfig);
+ updateOpenRouter(localOpenRouterConfig);
updateEnv({
OPENAI_API_KEY: process.env.NEXT_PUBLIC_OPENAI_API_KEY || "",
AZURE_API_KEY: process.env.NEXT_PUBLIC_AZURE_OPENAI_API_KEY || "",
diff --git a/src/hooks/useOpenAI/types.ts b/src/hooks/useOpenAI/types.ts
index d6887bd..b69b7f6 100755
--- a/src/hooks/useOpenAI/types.ts
+++ b/src/hooks/useOpenAI/types.ts
@@ -12,6 +12,12 @@ export type Azure = {
max_tokens: number;
};
+export type OpenRouter = {
+ apiKey: string;
+ temperature: number;
+ max_tokens: number;
+};
+
export type Env = {
OPENAI_API_KEY: string;
AZURE_API_KEY: string;
@@ -20,8 +26,11 @@ export type Env = {
export type OpenAIStore = {
openai: OpenAI;
azure: Azure;
+ openRouter: OpenRouter;
env: Env;
+
updateOpenAI: (openai: OpenAI) => void;
updateAzure: (azure: Azure) => void;
+ updateOpenRouter: (openRouter: OpenRouter) => void;
updateEnv: (env: Env) => void;
};
diff --git a/src/lib/calcTokens/gpt-tokens.ts b/src/lib/calcTokens/gpt-tokens.ts
index 5ea2080..779ab0d 100644
--- a/src/lib/calcTokens/gpt-tokens.ts
+++ b/src/lib/calcTokens/gpt-tokens.ts
@@ -42,31 +42,21 @@ interface MessageItem {
}
export class GPTTokens {
- constructor(options: { model: supportModelType; messages: MessageItem[] }) {
- const { model, messages } = options;
+ constructor(options: {
+ model: supportModelType;
+ messages: MessageItem[];
+ promptTokenRatio: number;
+ completionTokenRatio: number;
+ }) {
+ const { model, messages, promptTokenRatio, completionTokenRatio } = options;
if (!GPTTokens.supportModels.includes(model))
throw new Error(`Model ${model} is not supported`);
- if (model === "gpt-3.5-turbo")
- this.warning(
- `${model} may update over time. Returning num tokens assuming gpt-3.5-turbo-0613`
- );
- if (model === "gpt-3.5-turbo-16k")
- this.warning(
- `${model} may update over time. Returning num tokens assuming gpt-3.5-turbo-16k-0613`
- );
- if (model === "gpt-4")
- this.warning(
- `${model} may update over time. Returning num tokens assuming gpt-4-0613`
- );
- if (model === "gpt-4-32k")
- this.warning(
- `${model} may update over time. Returning num tokens assuming gpt-4-32k-0613`
- );
-
this.model = model;
this.messages = messages;
+ this.promptTokenRatio = promptTokenRatio;
+ this.completionTokenRatio = completionTokenRatio;
}
public static readonly supportModels: supportModelType[] = [
@@ -85,6 +75,8 @@ export class GPTTokens {
public readonly model;
public readonly messages;
+ public readonly promptTokenRatio;
+ public readonly completionTokenRatio;
// https://openai.com/pricing/
// gpt-3.5-turbo 4K context
@@ -152,12 +144,12 @@ export class GPTTokens {
this.model
)
) {
- const promptUSD = new Decimal(this.promptUsedTokens).mul(
- this.gpt3_5_turboPromptTokenUnit
- );
- const completionUSD = new Decimal(this.completionUsedTokens).mul(
- this.gpt3_5_turboCompletionTokenUnit
- );
+ const promptUSD = new Decimal(this.promptUsedTokens)
+ .mul(this.gpt3_5_turboPromptTokenUnit)
+ .mul(this.promptTokenRatio);
+ const completionUSD = new Decimal(this.completionUsedTokens)
+ .mul(this.gpt3_5_turboCompletionTokenUnit)
+ .mul(this.completionTokenRatio);
price = promptUSD.add(completionUSD).toNumber();
}
diff --git a/src/lib/calcTokens/index.ts b/src/lib/calcTokens/index.ts
index d1d8e42..356fa1d 100644
--- a/src/lib/calcTokens/index.ts
+++ b/src/lib/calcTokens/index.ts
@@ -1,8 +1,82 @@
import { GPTTokens } from "./gpt-tokens";
import type { supportModelType } from "./gpt-tokens";
-export const calcTokens = (messages: any[], model: supportModelType) => {
- const tokenInfo = new GPTTokens({ model, messages });
+const supportModels: supportModelType[] = [
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+];
+
+const transformModel = (model: string) => {
+ if (supportModels.includes(model as supportModelType)) {
+ return {
+ model,
+ promptTokenRatio: 1,
+ completionTokenRatio: 1,
+ };
+ }
+
+ if (model === "Claude Instant v1") {
+ return {
+ model: "gpt-3.5-turbo",
+ promptTokenRatio: 1.087,
+ completionTokenRatio: 2.755,
+ };
+ }
+ if (model === "Claude v2") {
+ return {
+ model: "gpt-3.5-turbo",
+ promptTokenRatio: 7.347,
+ completionTokenRatio: 16.34,
+ };
+ }
+ if (model === "PaLM 2 Bison") {
+ return {
+ model: "gpt-3.5-turbo",
+ promptTokenRatio: 1.333,
+ completionTokenRatio: 1,
+ };
+ }
+ if (model === "Llama v2 13B") {
+ return {
+ model: "gpt-3.5-turbo",
+ promptTokenRatio: 2.667,
+ completionTokenRatio: 2,
+ };
+ }
+ if (model === "Llama v2 70B") {
+ return {
+ model: "gpt-3.5-turbo",
+ promptTokenRatio: 10.667,
+ completionTokenRatio: 8,
+ };
+ }
+
+ return {
+ model: "gpt-3.5-turbo",
+ promptTokenRatio: 1,
+ completionTokenRatio: 1,
+ };
+};
+
+export const calcTokens = (messages: any[], modelLabel: supportModelType) => {
+ const { model, promptTokenRatio, completionTokenRatio } =
+ transformModel(modelLabel);
+
+ const tokenInfo = new GPTTokens({
+ model: model as supportModelType,
+ messages,
+ promptTokenRatio,
+ completionTokenRatio,
+ });
const { usedTokens, usedUSD } = tokenInfo;
diff --git a/src/lib/stream/index.ts b/src/lib/stream/index.ts
index ebc8049..326e851 100644
--- a/src/lib/stream/index.ts
+++ b/src/lib/stream/index.ts
@@ -23,10 +23,52 @@ interface StreamProps {
fetchFn?: any;
}
+interface ICalcTokens {
+ userId?: string;
+ headerApiKey?: string;
+ messages: any[];
+ content: string;
+ modelLabel: supportModelType;
+}
+
const sleep = (ms: number) => {
return new Promise((resolve) => setTimeout(resolve, ms));
};
+const calculateTokens = async ({
+ userId,
+ headerApiKey,
+ messages,
+ content,
+ modelLabel,
+}: ICalcTokens) => {
+ // If use own key, no need to calculate tokens
+ if (userId && !headerApiKey) {
+ const final = [...messages, { role: "assistant", content }];
+
+ const { usedTokens, usedUSD } = calcTokens(final, modelLabel);
+
+ const findUser = await prisma.user.findUnique({
+ where: { id: userId },
+ });
+ if (findUser) {
+ const costTokens = findUser.costTokens + usedTokens;
+ const costUSD = Number((findUser.costUSD + usedUSD).toFixed(5));
+ const availableTokens =
+ findUser.availableTokens - Math.ceil(usedUSD * BASE_PRICE);
+
+ await prisma.user.update({
+ where: { id: userId },
+ data: {
+ costTokens,
+ costUSD,
+ availableTokens,
+ },
+ });
+ }
+ }
+};
+
export const stream = async ({
readable,
writable,
@@ -104,30 +146,13 @@ export const stream = async ({
}
// If use own key, no need to calculate tokens
- if (userId && !headerApiKey) {
- const final = [...messages, { role: "assistant", content: tokenContext }];
-
- const { usedTokens, usedUSD } = calcTokens(final, modelLabel);
-
- const findUser = await prisma.user.findUnique({
- where: { id: userId },
- });
- if (findUser) {
- const costTokens = findUser.costTokens + usedTokens;
- const costUSD = Number((findUser.costUSD + usedUSD).toFixed(5));
- const availableTokens =
- findUser.availableTokens - Math.ceil(usedUSD * BASE_PRICE);
-
- await prisma.user.update({
- where: { id: userId },
- data: {
- costTokens,
- costUSD,
- availableTokens,
- },
- });
- }
- }
+ await calculateTokens({
+ userId,
+ headerApiKey,
+ messages,
+ content: tokenContext,
+ modelLabel,
+ });
if (!is_function_call) {
if (streamBuffer) await writer.write(encoder.encode(streamBuffer));
@@ -205,34 +230,13 @@ export const stream = async ({
}
// If use own key, no need to calculate tokens
- if (userId && !headerApiKey) {
- const final = [
- ...messages,
- { role: "assistant", content: function_call_resultContent },
- ];
-
- const { usedTokens, usedUSD } = calcTokens(final, modelLabel);
-
- const findUser = await prisma.user.findUnique({
- where: { id: userId },
- });
-
- if (findUser) {
- const costTokens = findUser.costTokens + usedTokens;
- const costUSD = Number((findUser.costUSD + usedUSD).toFixed(5));
- const availableTokens =
- findUser.availableTokens - Math.ceil(usedUSD * BASE_PRICE);
-
- await prisma.user.update({
- where: { id: userId },
- data: {
- costTokens,
- costUSD,
- availableTokens,
- },
- });
- }
- }
+ await calculateTokens({
+ userId,
+ headerApiKey,
+ messages: newMessages,
+ content: function_call_resultContent,
+ modelLabel,
+ });
if (function_call_streamBuffer) {
await writer.write(encoder.encode(function_call_streamBuffer));
diff --git a/src/locales/zh-CN.json b/src/locales/zh-CN.json
index 4386c21..38c3f45 100755
--- a/src/locales/zh-CN.json
+++ b/src/locales/zh-CN.json
@@ -203,7 +203,7 @@
"enter-to-search": "搜索",
"get-title": "这个聊天的简短而相关的标题是什么?您必须严格回答只有标题,不允许任何其他文本。",
"market": "集市",
- "model": "语言模型",
+ "model": "模型",
"no-data": "暂无数据",
"prompt-market": "Prompt 集市",
"recently-used": "最近使用",