Skip to content

Commit 6c13768

Browse files
authored
Merge pull request #1231 from refly-ai/main
Release 20250812
2 parents 5da5cea + 3671084 commit 6c13768

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+2613
-4272
lines changed

apps/api/Dockerfile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ RUN npm install -g pnpm
99
ENV npm_config_gyp_ignore=true
1010
ENV CYPRESS_INSTALL_BINARY=0
1111

12+
# Set node options to increase memory limit
13+
ENV NODE_OPTIONS='--max_old_space_size=8192'
14+
1215
# Copy all necessary files in one layer
1316
COPY pnpm-workspace.yaml pnpm-lock.yaml package.json turbo.json ./
1417
COPY apps/api/package.json ./apps/api/

apps/api/package.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,9 @@
3939
"@hocuspocus/extension-redis": "~2.15.0",
4040
"@hocuspocus/server": "~2.15.0",
4141
"@lancedb/lancedb": "^0.19.1",
42-
"@langchain/community": "0.3.29",
43-
"@langchain/core": "0.3.29",
44-
"@langchain/openai": "0.5.6",
42+
"@langchain/community": "0.3.50",
43+
"@langchain/core": "0.3.68",
44+
"@langchain/openai": "0.6.7",
4545
"@nest-lab/throttler-storage-redis": "^1.0.0",
4646
"@nestjs/bullmq": "~10.2.3",
4747
"@nestjs/common": "~10.3.9",
@@ -89,7 +89,7 @@
8989
"json-parse-even-better-errors": "^4.0.0",
9090
"jsonrepair": "^3.12.0",
9191
"jsonwebtoken": "~9.0.2",
92-
"langchain": "0.3.15",
92+
"langchain": "0.3.30",
9393
"lodash": "^4.17.21",
9494
"lru-cache": "^10.2.0",
9595
"mime": "^3.0.0",
@@ -120,8 +120,8 @@
120120
"ws": "~8.17.0",
121121
"y-protocols": "^1.0.6",
122122
"yjs": "^13.6.8",
123-
"zod": "3.23.8",
124-
"zod-to-json-schema": "^3.24.3"
123+
"zod": "^3.25.76",
124+
"zod-to-json-schema": "3.24.6"
125125
},
126126
"devDependencies": {
127127
"@golevelup/ts-jest": "^0.6.2",

apps/api/src/modules/skill/skill-invoker.service.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ import { SkillEngineService } from '../skill/skill-engine.service';
6262
import { CanvasService } from '../canvas/canvas.service';
6363
import { CanvasSyncService } from '../canvas/canvas-sync.service';
6464
import { ActionService } from '../action/action.service';
65+
import { extractChunkContent } from '../../utils/llm';
6566

6667
@Injectable()
6768
export class SkillInvokerService {
@@ -783,8 +784,7 @@ ${event.data?.input ? JSON.stringify(event.data?.input?.input) : ''}
783784
break;
784785
}
785786
case 'on_chat_model_stream': {
786-
const content = chunk.content.toString();
787-
const reasoningContent = chunk?.additional_kwargs?.reasoning_content?.toString() || '';
787+
const { content, reasoningContent } = extractChunkContent(chunk);
788788

789789
if ((content || reasoningContent) && !runMeta?.suppressOutput) {
790790
if (runMeta?.artifact) {

apps/api/src/utils/llm.spec.ts

Lines changed: 245 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,245 @@
1+
import { BaseMessageChunk } from '@langchain/core/messages';
2+
import { extractChunkContent } from './llm';
3+
4+
// Create a minimal mock type that only includes the properties we need for testing
5+
type MockChunk = {
6+
content:
7+
| string
8+
| Array<{
9+
type: string;
10+
text?: string;
11+
reasoningText?: { text?: string };
12+
[key: string]: any; // Allow additional properties for testing
13+
}>;
14+
additional_kwargs?: {
15+
reasoning_content?: any;
16+
};
17+
};
18+
19+
describe('extractChunkContent', () => {
20+
describe('when chunk.content is a string', () => {
21+
it('should extract string content and reasoning content from additional_kwargs', () => {
22+
const mockChunk: MockChunk = {
23+
content: 'Hello, world!',
24+
additional_kwargs: {
25+
reasoning_content: 'This is reasoning content',
26+
},
27+
};
28+
29+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
30+
31+
expect(result).toEqual({
32+
content: 'Hello, world!',
33+
reasoningContent: 'This is reasoning content',
34+
});
35+
});
36+
37+
it('should handle undefined reasoning content', () => {
38+
const mockChunk: MockChunk = {
39+
content: 'Hello, world!',
40+
additional_kwargs: {},
41+
};
42+
43+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
44+
45+
expect(result).toEqual({
46+
content: 'Hello, world!',
47+
reasoningContent: undefined,
48+
});
49+
});
50+
51+
it('should handle missing additional_kwargs', () => {
52+
const mockChunk: MockChunk = {
53+
content: 'Hello, world!',
54+
};
55+
56+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
57+
58+
expect(result).toEqual({
59+
content: 'Hello, world!',
60+
reasoningContent: undefined,
61+
});
62+
});
63+
64+
it('should convert non-string reasoning content to string', () => {
65+
const mockChunk: MockChunk = {
66+
content: 'Hello, world!',
67+
additional_kwargs: {
68+
reasoning_content: 123,
69+
},
70+
};
71+
72+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
73+
74+
expect(result).toEqual({
75+
content: 'Hello, world!',
76+
reasoningContent: '123',
77+
});
78+
});
79+
});
80+
81+
describe('when chunk.content is an array', () => {
82+
it('should extract text content from array items', () => {
83+
const mockChunk: MockChunk = {
84+
content: [
85+
{ type: 'text', text: 'Hello, ' },
86+
{ type: 'text', text: 'world!' },
87+
],
88+
};
89+
90+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
91+
92+
expect(result).toEqual({
93+
content: 'Hello, world!',
94+
reasoningContent: '',
95+
});
96+
});
97+
98+
it('should extract reasoning content from array items', () => {
99+
const mockChunk: MockChunk = {
100+
content: [
101+
{ type: 'reasoning_content', reasoningText: { text: 'This is reasoning' } },
102+
{ type: 'reasoning_content', reasoningText: { text: ' content' } },
103+
],
104+
};
105+
106+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
107+
108+
expect(result).toEqual({
109+
content: '',
110+
reasoningContent: 'This is reasoning content',
111+
});
112+
});
113+
114+
it('should handle mixed content types', () => {
115+
const mockChunk: MockChunk = {
116+
content: [
117+
{ type: 'text', text: 'Hello, ' },
118+
{ type: 'reasoning_content', reasoningText: { text: 'This is reasoning' } },
119+
{ type: 'text', text: 'world!' },
120+
{ type: 'reasoning_content', reasoningText: { text: ' content' } },
121+
],
122+
};
123+
124+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
125+
126+
expect(result).toEqual({
127+
content: 'Hello, world!',
128+
reasoningContent: 'This is reasoning content',
129+
});
130+
});
131+
132+
it('should handle undefined reasoningText', () => {
133+
const mockChunk: MockChunk = {
134+
content: [
135+
{ type: 'reasoning_content', reasoningText: undefined },
136+
{ type: 'reasoning_content', reasoningText: { text: ' content' } },
137+
],
138+
};
139+
140+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
141+
142+
expect(result).toEqual({
143+
content: '',
144+
reasoningContent: ' content',
145+
});
146+
});
147+
148+
it('should handle undefined reasoningText.text', () => {
149+
const mockChunk: MockChunk = {
150+
content: [
151+
{ type: 'reasoning_content', reasoningText: { text: undefined } },
152+
{ type: 'reasoning_content', reasoningText: { text: ' content' } },
153+
],
154+
};
155+
156+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
157+
158+
expect(result).toEqual({
159+
content: '',
160+
reasoningContent: ' content',
161+
});
162+
});
163+
164+
it('should handle unknown item types', () => {
165+
const mockChunk: MockChunk = {
166+
content: [
167+
{ type: 'text', text: 'Hello' },
168+
{ type: 'unknown_type', someProperty: 'value' },
169+
{ type: 'reasoning_content', reasoningText: { text: 'reasoning' } },
170+
],
171+
};
172+
173+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
174+
175+
expect(result).toEqual({
176+
content: 'Hello',
177+
reasoningContent: 'reasoning',
178+
});
179+
});
180+
181+
it('should handle empty array', () => {
182+
const mockChunk: MockChunk = {
183+
content: [],
184+
};
185+
186+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
187+
188+
expect(result).toEqual({
189+
content: '',
190+
reasoningContent: '',
191+
});
192+
});
193+
});
194+
195+
describe('edge cases', () => {
196+
it('should handle null reasoning content', () => {
197+
const mockChunk: MockChunk = {
198+
content: 'Hello, world!',
199+
additional_kwargs: {
200+
reasoning_content: null,
201+
},
202+
};
203+
204+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
205+
206+
expect(result).toEqual({
207+
content: 'Hello, world!',
208+
reasoningContent: undefined,
209+
});
210+
});
211+
212+
it('should handle empty string reasoning content', () => {
213+
const mockChunk: MockChunk = {
214+
content: 'Hello, world!',
215+
additional_kwargs: {
216+
reasoning_content: '',
217+
},
218+
};
219+
220+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
221+
222+
expect(result).toEqual({
223+
content: 'Hello, world!',
224+
reasoningContent: '',
225+
});
226+
});
227+
228+
it('should handle empty text in array items', () => {
229+
const mockChunk: MockChunk = {
230+
content: [
231+
{ type: 'text', text: '' },
232+
{ type: 'text', text: 'Hello' },
233+
{ type: 'text', text: '' },
234+
],
235+
};
236+
237+
const result = extractChunkContent(mockChunk as BaseMessageChunk);
238+
239+
expect(result).toEqual({
240+
content: 'Hello',
241+
reasoningContent: '',
242+
});
243+
});
244+
});
245+
});

apps/api/src/utils/llm.ts

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import { BaseMessageChunk } from '@langchain/core/messages';
2+
3+
interface ChunkContent {
4+
content: string;
5+
reasoningContent?: string;
6+
}
7+
8+
/**
9+
* Extract the content and reasoning content from a Langchain message chunk.
10+
* @param chunk - The chunk to extract the content and reasoning content from
11+
* @returns The content and reasoning content
12+
*/
13+
export const extractChunkContent = (chunk: BaseMessageChunk): ChunkContent => {
14+
if (typeof chunk.content === 'string') {
15+
return {
16+
content: chunk.content,
17+
reasoningContent: chunk.additional_kwargs?.reasoning_content?.toString(),
18+
};
19+
}
20+
21+
// Then content is an array of complex objects
22+
let content = '';
23+
let reasoningContent = '';
24+
25+
for (const item of chunk.content) {
26+
if (item.type === 'text') {
27+
content += item.text;
28+
} else if (item.type === 'reasoning_content') {
29+
reasoningContent += item.reasoningText?.text ?? '';
30+
}
31+
}
32+
33+
return {
34+
content,
35+
reasoningContent,
36+
};
37+
};

apps/api/src/utils/result.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,10 @@ export class ResultAggregator {
112112
const step = this.getOrInitData(meta.step?.name);
113113

114114
step.content += content;
115-
step.reasoningContent += reasoningContent;
115+
116+
if (reasoningContent) {
117+
step.reasoningContent += reasoningContent;
118+
}
116119

117120
this.data[step.name] = step;
118121
}

docs/cloud/credit-usage-guide.md

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -42,16 +42,19 @@ No manual action required, can switch to new packages anytime for more flexible
4242

4343
**Early bird users can use these models unlimitedly:**
4444
1. Claude Sonnet 4
45-
2. Claude 3.7 Sonnet (thinking)
45+
2. Claude 4 Sonnet (thinking)
4646
3. Kimi K2
4747
4. GPT-4o
4848
5. GPT-4.1
49-
6. Gemini 2.5 Pro
50-
7. Gemini 2.5 Flash
51-
8. DeepSeek V3
52-
9. DeepSeek R1
53-
10. Grok 4
54-
11. Qwen3 Coder
49+
6. GPT-5
50+
7. GPT-o3
51+
8. GPT-OSS-120B
52+
9. Gemini 2.5 Pro
53+
10. Gemini 2.5 Flash
54+
11. DeepSeek V3
55+
12. DeepSeek R1
56+
13. Grok 4
57+
14. Qwen3 Coder
5558

5659
## 📊 How to Check Credit Balance and Usage
5760

@@ -67,12 +70,13 @@ You can check in the following locations:
6770
| Model | Credit Consumption |
6871
|-------|-------------------|
6972
| Claude Sonnet 4 | ~11 credits |
70-
| Claude Opus 4 | ~53 credits |
7173
| Claude Opus 4.1 | ~53 credits |
72-
| Claude 3.7 Sonnet (thinking) | ~11 credits |
74+
| Claude 4 Sonnet (thinking) | ~11 credits |
7375
| Kimi K2 | ~2 credits |
7476
| GPT-4o | ~7 credits |
7577
| GPT-4.1 | ~6 credits |
78+
| GPT-5 | ~7 credits |
79+
| GPT-o3 | ~6 credits |
7680
| GPT-OSS-120B | ~1 credit |
7781
| Gemini 2.5 Pro | ~7 credits |
7882
| Gemini 2.5 Flash | ~2 credits |

0 commit comments

Comments
 (0)