Skip to content

Commit cccbbe0

Browse files
committed
Initial commit from Create Next App
0 parents  commit cccbbe0

File tree

40 files changed

+7479
-0
lines changed

40 files changed

+7479
-0
lines changed

.env.local.example

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
OPENAI_API_KEY=xxxxxxx

.gitignore

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2+
3+
# dependencies
4+
/node_modules
5+
/.pnp
6+
.pnp.js
7+
8+
# testing
9+
/coverage
10+
11+
# next.js
12+
/.next/
13+
/out/
14+
15+
# production
16+
/build
17+
18+
# misc
19+
.DS_Store
20+
*.pem
21+
22+
# debug
23+
npm-debug.log*
24+
yarn-debug.log*
25+
yarn-error.log*
26+
27+
# local env files
28+
.env*.local
29+
30+
# vercel
31+
.vercel
32+
33+
# typescript
34+
*.tsbuildinfo
35+
next-env.d.ts

README.md

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# Vercel AI SDK, Next.js, and OpenAI Chat Example
2+
3+
This example shows how to use the [Vercel AI SDK](https://sdk.vercel.ai/docs) with [Next.js](https://nextjs.org/) and [OpenAI](https://openai.com) to create a ChatGPT-like AI-powered streaming chat bot.
4+
5+
## Deploy your own
6+
7+
Deploy the example using [Vercel](https://vercel.com?utm_source=github&utm_medium=readme&utm_campaign=ai-sdk-example):
8+
9+
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fvercel%2Fai%2Ftree%2Fmain%2Fexamples%2Fnext-openai&env=OPENAI_API_KEY&envDescription=OpenAI%20API%20Key&envLink=https%3A%2F%2Fplatform.openai.com%2Faccount%2Fapi-keys&project-name=vercel-ai-chat-openai&repository-name=vercel-ai-chat-openai)
10+
11+
## How to use
12+
13+
Execute [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app) with [npm](https://docs.npmjs.com/cli/init), [Yarn](https://yarnpkg.com/lang/en/docs/cli/create/), or [pnpm](https://pnpm.io) to bootstrap the example:
14+
15+
```bash
16+
npx create-next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app
17+
```
18+
19+
```bash
20+
yarn create next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app
21+
```
22+
23+
```bash
24+
pnpm create next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app
25+
```
26+
27+
To run the example locally you need to:
28+
29+
1. Sign up at [OpenAI's Developer Platform](https://platform.openai.com/signup).
30+
2. Go to [OpenAI's dashboard](https://platform.openai.com/account/api-keys) and create an API KEY.
31+
3. Set the required OpenAI environment variable as the token value as shown [the example env file](./.env.local.example) but in a new file called `.env.local`
32+
4. `pnpm install` to install the required dependencies.
33+
5. `pnpm dev` to launch the development server.
34+
35+
## Learn More
36+
37+
To learn more about OpenAI, Next.js, and the Vercel AI SDK take a look at the following resources:
38+
39+
- [Vercel AI SDK docs](https://sdk.vercel.ai/docs)
40+
- [Vercel AI Playground](https://play.vercel.ai)
41+
- [OpenAI Documentation](https://platform.openai.com/docs) - learn about OpenAI features and API.
42+
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.

app/api/assistant/assistant-setup.md

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
# Home Automation Assistant Example
2+
3+
## Setup
4+
5+
### Create OpenAI Assistant
6+
7+
[OpenAI Assistant Website](https://platform.openai.com/assistants)
8+
9+
Create a new assistant. Enable Code interpreter. Add the following functions and instructions to the assistant.
10+
11+
Then add the assistant id to the `.env.local` file as `ASSISTANT_ID=your-assistant-id`.
12+
13+
### Instructions
14+
15+
```
16+
You are an assistant with access to a home automation system. You can get and set the temperature in the bedroom, home office, living room, kitchen and bathroom.
17+
18+
The system uses temperature in Celsius. If the user requests Fahrenheit, you should convert the temperature to Fahrenheit.
19+
```
20+
21+
### getRoomTemperature function
22+
23+
```json
24+
{
25+
"name": "getRoomTemperature",
26+
"description": "Get the temperature in a room",
27+
"parameters": {
28+
"type": "object",
29+
"properties": {
30+
"room": {
31+
"type": "string",
32+
"enum": ["bedroom", "home office", "living room", "kitchen", "bathroom"]
33+
}
34+
},
35+
"required": ["room"]
36+
}
37+
}
38+
```
39+
40+
### setRoomTemperature function
41+
42+
```json
43+
{
44+
"name": "setRoomTemperature",
45+
"description": "Set the temperature in a room",
46+
"parameters": {
47+
"type": "object",
48+
"properties": {
49+
"room": {
50+
"type": "string",
51+
"enum": ["bedroom", "home office", "living room", "kitchen", "bathroom"]
52+
},
53+
"temperature": { "type": "number" }
54+
},
55+
"required": ["room", "temperature"]
56+
}
57+
}
58+
```
59+
60+
## Run
61+
62+
1. Run `pnpm run dev` in `examples/next-openai`
63+
2. Go to http://localhost:3000/assistant

app/api/assistant/route.ts

+126
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
import { AssistantResponse } from 'ai';
2+
import OpenAI from 'openai';
3+
4+
// Create an OpenAI API client (that's edge friendly!)
5+
const openai = new OpenAI({
6+
apiKey: process.env.OPENAI_API_KEY || '',
7+
});
8+
9+
// Allow streaming responses up to 30 seconds
10+
export const maxDuration = 30;
11+
12+
const homeTemperatures = {
13+
bedroom: 20,
14+
'home office': 21,
15+
'living room': 21,
16+
kitchen: 22,
17+
bathroom: 23,
18+
};
19+
20+
export async function POST(req: Request) {
21+
// Parse the request body
22+
const input: {
23+
threadId: string | null;
24+
message: string;
25+
} = await req.json();
26+
27+
// Create a thread if needed
28+
const threadId = input.threadId ?? (await openai.beta.threads.create({})).id;
29+
30+
// Add a message to the thread
31+
const createdMessage = await openai.beta.threads.messages.create(
32+
threadId,
33+
{
34+
role: 'user',
35+
content: input.message,
36+
},
37+
{ signal: req.signal },
38+
);
39+
40+
return AssistantResponse(
41+
{ threadId, messageId: createdMessage.id },
42+
async ({ forwardStream, sendDataMessage }) => {
43+
// Run the assistant on the thread
44+
const runStream = openai.beta.threads.runs.stream(
45+
threadId,
46+
{
47+
assistant_id:
48+
process.env.ASSISTANT_ID ??
49+
(() => {
50+
throw new Error('ASSISTANT_ID is not set');
51+
})(),
52+
},
53+
{ signal: req.signal },
54+
);
55+
56+
// forward run status would stream message deltas
57+
let runResult = await forwardStream(runStream);
58+
59+
// status can be: queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired
60+
while (
61+
runResult?.status === 'requires_action' &&
62+
runResult.required_action?.type === 'submit_tool_outputs'
63+
) {
64+
const tool_outputs =
65+
runResult.required_action.submit_tool_outputs.tool_calls.map(
66+
(toolCall: any) => {
67+
const parameters = JSON.parse(toolCall.function.arguments);
68+
69+
switch (toolCall.function.name) {
70+
case 'getRoomTemperature': {
71+
const temperature =
72+
homeTemperatures[
73+
parameters.room as keyof typeof homeTemperatures
74+
];
75+
76+
return {
77+
tool_call_id: toolCall.id,
78+
output: temperature.toString(),
79+
};
80+
}
81+
82+
case 'setRoomTemperature': {
83+
const oldTemperature =
84+
homeTemperatures[
85+
parameters.room as keyof typeof homeTemperatures
86+
];
87+
88+
homeTemperatures[
89+
parameters.room as keyof typeof homeTemperatures
90+
] = parameters.temperature;
91+
92+
sendDataMessage({
93+
role: 'data',
94+
data: {
95+
oldTemperature,
96+
newTemperature: parameters.temperature,
97+
description: `Temperature in ${parameters.room} changed from ${oldTemperature} to ${parameters.temperature}`,
98+
},
99+
});
100+
101+
return {
102+
tool_call_id: toolCall.id,
103+
output: `temperature set successfully`,
104+
};
105+
}
106+
107+
default:
108+
throw new Error(
109+
`Unknown tool call function: ${toolCall.function.name}`,
110+
);
111+
}
112+
},
113+
);
114+
115+
runResult = await forwardStream(
116+
openai.beta.threads.runs.submitToolOutputsStream(
117+
threadId,
118+
runResult.id,
119+
{ tool_outputs },
120+
{ signal: req.signal },
121+
),
122+
);
123+
}
124+
},
125+
);
126+
}

app/api/chat/route.ts

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { streamText } from 'ai';
3+
4+
// Allow streaming responses up to 30 seconds
5+
export const maxDuration = 30;
6+
7+
export async function POST(req: Request) {
8+
// Extract the `messages` from the body of the request
9+
const { messages } = await req.json();
10+
11+
// Call the language model
12+
const result = await streamText({
13+
model: openai('gpt-4-turbo'),
14+
messages,
15+
async onFinish({ text, toolCalls, toolResults, usage, finishReason }) {
16+
// implement your own logic here, e.g. for storing messages
17+
// or recording token usage
18+
},
19+
});
20+
21+
// Respond with the stream
22+
return result.toAIStreamResponse();
23+
}

app/api/completion/route.ts

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { StreamData, StreamingTextResponse, streamText } from 'ai';
3+
4+
// Allow streaming responses up to 30 seconds
5+
export const maxDuration = 30;
6+
7+
export async function POST(req: Request) {
8+
// Extract the `prompt` from the body of the request
9+
const { prompt } = await req.json();
10+
11+
const result = await streamText({
12+
model: openai('gpt-3.5-turbo-instruct'),
13+
maxTokens: 2000,
14+
prompt,
15+
});
16+
17+
// optional: use stream data
18+
const data = new StreamData();
19+
20+
data.append('call started');
21+
22+
// Convert the response to an AI data stream
23+
const stream = result.toAIStream({
24+
onFinal(completion) {
25+
data.append('call completed');
26+
data.close();
27+
},
28+
});
29+
30+
// Respond with the stream
31+
return new StreamingTextResponse(stream, {}, data);
32+
}

app/api/use-chat-streamdata/route.ts

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { StreamData, StreamingTextResponse, streamText } from 'ai';
3+
4+
// Allow streaming responses up to 30 seconds
5+
export const maxDuration = 30;
6+
7+
export async function POST(req: Request) {
8+
const { messages } = await req.json();
9+
10+
const result = await streamText({
11+
model: openai('gpt-4-turbo'),
12+
messages,
13+
});
14+
15+
// optional: use stream data
16+
const data = new StreamData();
17+
18+
data.append('initialized call');
19+
20+
return new StreamingTextResponse(
21+
result.toAIStream({
22+
onFinal() {
23+
data.append('call completed');
24+
data.close();
25+
},
26+
}),
27+
{},
28+
data,
29+
);
30+
}

0 commit comments

Comments
 (0)