-
Notifications
You must be signed in to change notification settings - Fork 4
/
index.js
129 lines (109 loc) · 3.35 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
const express = require("express");
const fileUpload = require("express-fileupload");
const app = express();
const { Configuration, OpenAIApi } = require("openai");
const fs = require("fs");
const { promisify } = require("util");
const path = require("path");
const tts = promisify(require("./utils/tts"));
require("dotenv").config({
path: path.resolve(__dirname, ".env.preview.local"),
});
const generateAudio = (text) => {
return new Promise((resolve, reject) => {
const auth = {
app_id: process.env.TTS_APP_ID,
app_skey: process.env.TTS_API_SECRET,
app_akey: process.env.TTS_API_KEY,
};
// 讯飞 api 参数配置
const business = {
aue: "lame",
sfl: 1,
speed: 50,
pitch: 50,
volume: 100,
bgs: 0,
};
const id = new Date().getTime();
// 存储文件的路径
const file = path.resolve(__dirname, `client/audio/${id}.m4a`);
try {
// 执行请求
tts(auth, business, text, file).then((res) => {
resolve(`audio/${id}.m4a`);
});
} catch (e) {
reject(e);
}
});
};
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
const handleIssueReply = async (prompt) => {
const {
data: { choices },
} = await openai.createCompletion({
model: "text-davinci-003",
prompt,
temperature: 0.5,
max_tokens: 1000,
top_p: 1.0,
frequency_penalty: 0.0,
presence_penalty: 0.0,
});
const chat = choices[0].text?.trim();
return chat;
};
const getAudioLength = (filePath) => {
return new Promise((resolve, reject) => {
fs.stat(path.resolve(__dirname, `client/${filePath}`), function (err, stats) {
if (err) {
reject(err);
}
resolve(parseInt(stats.size / 6600));
});
})
}
app.use(fileUpload());
app.post("/api/audio", async (req, res) => {
if (!req.files) return res.status(400).send({ message: "缺少参数", error: true });
const file = req.files.file;
// 存放用户上传的文件
const fileName = "audio.m4a";
file.mv(fileName, async (err) => {
if (err) {
return res.status(500).send(err);
}
const {
data: { text: prompt },
} = await openai.createTranscription(
fs.createReadStream(fileName),
"whisper-1"
);
console.log("解析的音频内容是>>>", prompt);
// 判断用户上传音频是否存在内容
if (!prompt.trim().length)
return res.send({ message: "未识别到语音内容", error: true });
const chatReply = await handleIssueReply(prompt);
console.log("生成的文本内容是>>>", chatReply);
const content = await generateAudio(chatReply);
console.log("生成的音频是>>>", content);
const length = await getAudioLength(content)
console.log("生成的音频长度是>>>", length);
res.send([
{ type: "system", content, chatReply, infoType: "audio", length, playStatus: false },
]);
});
});
app.get("/api/submit-issue", async (req, res) => {
const { issue } = req.query;
if (!issue.trim()) return res.status(400).send({ message: "缺少参数", error: true });
const chatReply = await handleIssueReply(issue);
return res.send([{ type: "system", content: chatReply.trim().replace('\n', '') }]);
});
app.use(express.static(path.join(__dirname, "client")));
app.listen(3000);
module.exports = app;