-
Notifications
You must be signed in to change notification settings - Fork 1.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Svelte 5 Support #3107
Comments
You can use |
This is a suggested implementation with import type {
Attachment,
ChatRequest,
ChatRequestOptions,
JSONValue,
Message,
RequestOptions,
} from "ai";
import {
callChatApi,
processChatStream,
type UseChatOptions as SharedUseChatOptions,
} from "@ai-sdk/ui-utils";
import type { UseChatHelpers } from "ai/svelte";
import { get, writable } from "svelte/store";
export interface UseChatOptions extends SharedUseChatOptions {
/**
* Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
*
* A maximum number is required to prevent infinite loops in the case of misconfigured tools.
*
* By default, it's set to 1, which means that only a single LLM call is made.
*
* @default 1
*/
maxSteps?: number;
}
/**
* Implementation of AI SDK `useChat` for Svelte 5.
* Use official `@ai-sdk/svelte` when [GitHub #3107](https://github.com/vercel/ai/issues/3107) is resolved.
*/
export function useChat({
maxSteps = 1,
initialMessages = [],
initialInput = "",
...options
}: UseChatOptions = {}) {
const messages = writable<Message[]>(initialMessages);
const streamData = writable<JSONValue[]>([]);
const input = writable(initialInput);
const error = writable<Error | undefined>();
const loading = writable(false);
let abortController: AbortController | null = null;
const sendChatRequest = async (chatRequest: ChatRequest) => {
const previousData = get(streamData);
const previousMessages = get(messages);
const previousMessagesCount = Number(previousMessages.length);
try {
error.set(undefined);
loading.set(true);
abortController = new AbortController();
await processChatStream({
getStreamedResponse() {
return fetchStreamedResponse({
...options,
abortController,
chatRequest,
previousData,
previousMessages,
mutate(newest) {
messages.set(newest);
},
mutateData(newest) {
streamData.set(newest);
},
});
},
updateChatRequest(newChatRequest) {
chatRequest = newChatRequest;
},
getCurrentMessages() {
return get(messages);
},
});
abortController = null;
} catch (err) {
if (err instanceof Error) {
if (err.name === "AbortError") {
return null;
}
if (typeof options?.onError === "function") {
options.onError(err);
}
error.set(err);
}
} finally {
loading.set(false);
}
const newMessages = get(messages);
const lastMessage = newMessages.at(-1);
if (
// Check if the number of messages has increased.
newMessages.length > previousMessagesCount &&
// Check if there is a last message.
!!lastMessage &&
// Check if the number of steps is less than the maximum allowed.
maxSteps > 1 &&
// Check if the last message is from the assistant and has completed tool invocations.
isAssistantMessageWithCompletedToolCalls(lastMessage) &&
// Limit the number of steps to prevent infinite loops.
countTrailingAssistantMessages(newMessages) < maxSteps
) {
await sendChatRequest({ messages: newMessages });
}
};
const handleSubmit: UseChatHelpers["handleSubmit"] = async (event, submitOptions = {}) => {
event?.preventDefault?.();
if (!input && !submitOptions.allowEmptySubmit) {
return;
}
const requestOptions: RequestOptions = {
headers: options?.headers ?? submitOptions.headers ?? submitOptions.options?.headers,
body: options?.body ?? submitOptions.body ?? submitOptions.options?.body,
};
const attachments = await prepareAttachmentsForRequest(submitOptions.experimental_attachments);
const message: Message = {
id: crypto.randomUUID(),
role: "user",
content: get(input),
createdAt: new Date(),
// Extracted from vercel/ai
// https://github.com/vercel/ai/blob/3c68277f00e96c5570aa554a206e409bccf062cb/packages/react/src/use-chat.ts#L605-L608
experimental_attachments: attachments.length > 0 ? attachments : undefined,
};
const chatRequest: ChatRequest = {
messages: get(messages).concat(message),
data: get(streamData),
body: requestOptions.body,
headers: requestOptions.headers,
options: requestOptions,
};
sendChatRequest(chatRequest);
// Clear the input field.
input.set("");
};
const append: UseChatHelpers["append"] = (message, submitOptions: ChatRequestOptions = {}) => {
if (!message.id) {
message.id = crypto.randomUUID();
}
const requestOptions: RequestOptions = {
headers: options?.headers ?? submitOptions.headers ?? submitOptions.options?.headers,
body: options?.body ?? submitOptions.body ?? submitOptions.options?.body,
};
const chatRequest: ChatRequest = {
...submitOptions,
messages: get(messages).concat(message as Message),
data: get(streamData),
body: requestOptions.body,
headers: requestOptions.headers,
options: requestOptions,
};
return sendChatRequest(chatRequest);
};
const stop: UseChatHelpers["stop"] = () => {
if (abortController) {
abortController.abort();
abortController = null;
}
};
const setMessages: UseChatHelpers["setMessages"] = (newMessages) => {
if (typeof newMessages === "function") {
messages.update(newMessages);
return;
}
messages.set(newMessages);
};
const reload: UseChatHelpers["reload"] = async (submitOptions = {}) => {
const previousMessages = get(messages);
if (!previousMessages.length) {
return null;
}
const requestOptions: RequestOptions = {
headers: options?.headers ?? submitOptions.headers ?? submitOptions.options?.headers,
body: options?.body ?? submitOptions.body ?? submitOptions.options?.body,
};
const lastMessage = previousMessages.at(-1);
if (lastMessage?.role === "assistant") {
const chatRequest: ChatRequest = {
messages: previousMessages.slice(0, -1),
options: requestOptions,
headers: requestOptions.headers,
body: requestOptions.body,
data: get(streamData),
};
return sendChatRequest(chatRequest);
}
const chatRequest: ChatRequest = {
messages: previousMessages,
options: requestOptions,
headers: requestOptions.headers,
body: requestOptions.body,
data: get(streamData),
};
return sendChatRequest(chatRequest);
};
return {
error,
isLoading: loading,
messages,
data: streamData,
input,
handleSubmit,
append,
stop,
reload,
setMessages,
};
}
/**
* Checks if a given message is from an assistant and has completed tool invocations.
*
* A message is considered to have completed tool invocations if:
* - The message role is "assistant".
* - The message has a non-empty array of tool invocations.
* - Every tool invocation in the array contains a "result" property.
* @param message - The message object to check.
* @returns `true` if the message is from an assistant and all tool invocations are completed, otherwise `false`.
*/
function isAssistantMessageWithCompletedToolCalls(message: Message) {
return (
message.role === "assistant" &&
message.toolInvocations &&
message.toolInvocations.length > 0 &&
message.toolInvocations.every((toolInvocation) => "result" in toolInvocation)
);
}
/**
* Returns the number of trailing assistant messages in the array.
*/
function countTrailingAssistantMessages(messages: Message[]) {
let count = 0;
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === "assistant") {
count++;
} else {
break;
}
}
return count;
}
interface FetchStreamedResponseOptions extends SharedUseChatOptions {
chatRequest: ChatRequest;
abortController: AbortController | null;
/**
* The current messages.
*/
previousMessages: Message[];
/**
* The current stream data.
*/
previousData: JSONValue[];
/**
* Mutate the messages.
* @param messages The new messages.
*/
mutate(messages: Message[]): void;
/**
* Mutate the stream data.
* @param data The new stream data.
*/
mutateData(data: JSONValue[]): void;
}
function fetchStreamedResponse({
previousData,
previousMessages,
chatRequest,
mutate,
mutateData,
...options
}: FetchStreamedResponseOptions) {
// Optimistic update to avoid waiting for the response.
mutate(chatRequest.messages);
const body = {
...chatRequest.body,
messages: chatRequest.messages,
data: chatRequest.data,
functions: chatRequest.functions,
function_call: chatRequest.function_call,
tools: chatRequest.tools,
tool_choice: chatRequest.tool_choice,
};
return callChatApi({
...options,
body,
api: options.api ?? "/api/chat",
streamProtocol: options.streamProtocol,
credentials: options.credentials,
headers: options.headers,
generateId: options.generateId ?? (() => crypto.randomUUID()),
fetch: options.fetch ?? fetch,
onResponse: options.onResponse,
onFinish: options.onFinish,
onToolCall: options.onToolCall,
abortController: () => options.abortController,
restoreMessagesOnFailure() {
if (!options.keepLastMessageOnError) {
mutate(previousMessages);
}
},
onUpdate(messages, data) {
mutate([...chatRequest.messages, ...messages]);
mutateData([...previousData, ...(data ?? [])]);
},
});
}
/**
* Prepares attachments for a chat request.
* Extracted from [vercel/ai](https://github.com/vercel/ai).
* @see https://github.com/vercel/ai/blob/3c68277f00e96c5570aa554a206e409bccf062cb/packages/react/src/use-chat.ts#L709-L744
*/
async function prepareAttachmentsForRequest(
attachmentsFromOptions: FileList | Array<Attachment> | undefined,
) {
if (attachmentsFromOptions == null) {
return [];
}
if (attachmentsFromOptions instanceof FileList) {
return Promise.all(
Array.from(attachmentsFromOptions).map(async (attachment) => {
const { name, type } = attachment;
const dataUrl = await new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.onload = (readerEvent) => {
resolve(readerEvent.target?.result as string);
};
reader.onerror = (error) => reject(error);
reader.readAsDataURL(attachment);
});
return {
name,
contentType: type,
url: dataUrl,
};
}),
);
}
if (Array.isArray(attachmentsFromOptions)) {
return attachmentsFromOptions;
}
throw new Error("Invalid attachments type");
} The usage will be: <script lang="ts">
import { Button, Input } from "$lib/components/atoms.ts";
import { useChat } from "$lib/hooks.ts";
const { messages, input, error, isLoading, handleSubmit } = useChat();
</script>
<div class="flex flex-col gap-2">
{#each $messages as message}
<div>
<p>{message.role}: {message.content}</p>
</div>
{/each}
</div>
{#if error instanceof Error}
<p>{error.message}</p>
{/if}
<form onsubmit={handleSubmit}>
<Input bind:value={$input} />
<Button loading={$isLoading} type="submit">Send</Button>
</form> |
Sorry, Andrey. Did not mean to ping you. :) |
@adwher thanks for sharing your snippet. for me it did not add the initial messages, would you consider adding the following change? export function useChat({ maxSteps = 1, ...options }: UseChatOptions = {}) {
- const messages = writable<Message[]>([]);
+ const messages = writable<Message[]>(options.initialMessages ?? []);
const streamData = writable<JSONValue[]>([]); |
Yes, I already added that on my local code but forgot to added to the snippet, thank you @noxan for the reminder. 🙏 |
Currently unable to install
|
Add @lgrammel Please bump the version for things that just work. |
|
Description
Currently, the
@ai-sdk/svelte
only supports Svelte 3 and Svelte 4 but could be completely usable by changing thesswr
dependency with something like@tanstack/svelte-query
or a built-in fetching strategy.Code example
Svelte 5: Runes mode
Svelte 5: Compatibility mode
Additional context
These are the places where the library uses the Svelte 4 restrictive code:
https://github.com/ConsoleTVs/sswr/blob/836f3eab4faa4e22bd936f39ea66a42d56707175/src/sswr.ts#L29-L50
The text was updated successfully, but these errors were encountered: