Skip to content

Commit 7a848e8

Browse files
committed
fix: replace gpt-3.5, gpt-4o, gpt-4.1 to gpt-5
1 parent 9bfc23d commit 7a848e8

File tree

7 files changed

+17
-17
lines changed

7 files changed

+17
-17
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ import { OpenAI } from "openai";
236236
const client = new OpenAI();
237237
238238
const stream = await client.responses.create({
239-
model: "gpt-4.1",
239+
model: "gpt-5",
240240
input: [
241241
{
242242
role: "user",
@@ -462,7 +462,7 @@ chats:
462462
agent_name: "url-checker"
463463
systemMessage: "Check for url in answer."
464464
completionParams:
465-
model: "gpt-4.1-nano"
465+
model: "gpt-5-nano"
466466
```
467467
468468
### How Evaluators Are Used

src/config.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ export function generateConfig(): ConfigType {
194194
whisperBaseUrl: "",
195195
},
196196
vision: {
197-
model: "gpt-4.1-mini",
197+
model: "gpt-5-mini",
198198
},
199199
logLevel: "info",
200200
langfuse: {
@@ -209,7 +209,7 @@ export function generateConfig(): ConfigType {
209209
name: "default",
210210
agent_name: "default",
211211
completionParams: {
212-
model: "gpt-4.1-mini",
212+
model: "gpt-5-mini",
213213
},
214214
systemMessage: "You are using functions to answer the questions. Current date: {date}",
215215
tools: ["javascript_interpreter", "brainstorm", "fetch"],
@@ -253,7 +253,7 @@ export function generateConfig(): ConfigType {
253253
username: "telegram_username_for_private_chats",
254254
prefix: "бот",
255255
completionParams: {
256-
model: "gpt-4.1-mini",
256+
model: "gpt-5-mini",
257257
temperature: 0.7,
258258
},
259259
local_model: "",

src/helpers/gpt/llm.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ export async function handleModelAnswer({
290290
: undefined;
291291
const model = modelExternal
292292
? modelExternal.model
293-
: gptContext.thread.completionParams?.model || "gpt-4.1-mini";
293+
: gptContext.thread.completionParams?.model || "gpt-5-mini";
294294
const apiParams = {
295295
messages: gptContext.messages,
296296
model,
@@ -517,7 +517,7 @@ export async function processToolResults({
517517
: undefined;
518518
const model = modelExternal
519519
? modelExternal.model
520-
: gptContext.thread.completionParams?.model || "gpt-4.1-mini";
520+
: gptContext.thread.completionParams?.model || "gpt-5-mini";
521521
const apiParams = {
522522
messages: gptContext.messages,
523523
model,
@@ -660,7 +660,7 @@ export async function requestGptAnswer(
660660
: undefined;
661661
const model = modelExternal
662662
? modelExternal.model
663-
: thread.completionParams?.model || "gpt-4.1-mini";
663+
: thread.completionParams?.model || "gpt-5-mini";
664664
const apiParams = {
665665
messages,
666666
model,

src/tools/brainstorm.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ export class BrainstormClient extends AIFunctionsProvider {
5151
const { res } = await llmCall({
5252
apiParams: {
5353
messages,
54-
model: thread.completionParams?.model || "gpt-4o-mini",
54+
model: thread.completionParams?.model || "gpt-5-mini",
5555
temperature: thread.completionParams?.temperature,
5656
},
5757
msg,

testConfig.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,14 @@ mcpServers:
1414
stt:
1515
whisperBaseUrl: ""
1616
vision:
17-
model: gpt-4.1-mini
17+
model: gpt-5-mini
1818
local_models: []
1919
useChatsDir: false
2020
chatsDir: data/chats
2121
chats:
2222
- name: default
2323
completionParams:
24-
model: gpt-4.1-mini
24+
model: gpt-5-mini
2525
systemMessage: "You are using functions to answer the questions. Current date: {date}"
2626
tools:
2727
- javascript_interpreter

tests/helpers/getTokensCount.test.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ jest.unstable_mockModule("js-tiktoken", () => ({
1010
let getTokensCount: typeof import("../../src/helpers/gpt/messages.ts").getTokensCount;
1111

1212
const baseConfig: { completionParams: { model: string } } = {
13-
completionParams: { model: "gpt-3.5-turbo" },
13+
completionParams: { model: "gpt-5-nano" },
1414
};
1515

1616
describe("getTokensCount", () => {
@@ -22,15 +22,15 @@ describe("getTokensCount", () => {
2222

2323
it("uses model name to select encoding", () => {
2424
getTokensCount(baseConfig, "a b c");
25-
expect(mockEncodingForModel).toHaveBeenCalledWith("gpt-3.5-turbo");
25+
expect(mockEncodingForModel).toHaveBeenCalledWith("gpt-5-nano");
2626
});
2727

28-
it("passes model name for 4o models", () => {
28+
it("passes model name for 5 models", () => {
2929
const cfg: { completionParams: { model: string } } = {
30-
completionParams: { model: "gpt-4o" },
30+
completionParams: { model: "gpt-5-mini" },
3131
};
3232
getTokensCount(cfg, "a b c");
33-
expect(mockEncodingForModel).toHaveBeenCalledWith("gpt-4o");
33+
expect(mockEncodingForModel).toHaveBeenCalledWith("gpt-5-mini");
3434
});
3535

3636
it("counts tokens using encoding", () => {

tests/helpers/retryLogic.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ describe.skip("executeTools retry logic", () => {
3636
showToolMessages: true,
3737
},
3838
completionParams: {
39-
model: "gpt-4o-mini",
39+
model: "gpt-5-mini",
4040
},
4141
name: "default",
4242
systemMessage: "Test system message",

0 commit comments

Comments
 (0)