Compare commits

...

2 Commits

Author SHA1 Message Date
liqupan
7fe4b05cf8 feat: 1130 2025-11-30 21:25:23 +08:00
liqupan
792fa980f9 feat:支持语音 2025-11-29 00:18:09 +08:00
5 changed files with 1058 additions and 65 deletions

3
.gitignore vendored
View File

@@ -72,4 +72,5 @@ coverage/
# UniApp specific
unpackage/
dist/
.history/
.history/
purple-energy-visualizer/

View File

@@ -54,6 +54,7 @@
"@dcloudio/uni-quickapp-webview": "3.0.0-4060420250429001",
"ant-design-vue": "^4.2.6",
"pinia": "^2.1.7",
"three": "^0.181.2",
"vue": "^3.4.21",
"vue-i18n": "^9.1.9"
},

File diff suppressed because it is too large Load Diff

View File

@@ -565,6 +565,29 @@ export const voiceAPI = {
authHeader = loginStatus.token.startsWith('Bearer ') ? loginStatus.token : 'Bearer ' + loginStatus.token;
}
// 构建formData支持更多参数
const formData = {
modelId: options.modelId || null,
templateId: options.templateId || null,
voiceStyle: options.voiceStyle || 'default'
};
// 添加可选参数
if (options.sessionId) {
formData.sessionId = options.sessionId;
}
if (options.ttsConfigId) {
formData.ttsConfigId = options.ttsConfigId;
}
if (options.sttConfigId) {
formData.sttConfigId = options.sttConfigId;
}
if (options.useFunctionCall !== undefined) {
formData.useFunctionCall = options.useFunctionCall;
}
console.log('语音对话参数:', formData);
return new Promise((resolve) => {
uni.uploadFile({
url: getApiUrl(API_CONFIG.ENDPOINTS.VOICE_CHAT),
@@ -573,11 +596,7 @@ export const voiceAPI = {
header: authHeader ? {
'Authorization': authHeader
} : {},
formData: {
modelId: options.modelId || null,
templateId: options.templateId || null,
voiceStyle: options.voiceStyle || 'default'
},
formData: formData,
success: (res) => {
console.log('语音对话上传成功:', res);
@@ -586,41 +605,46 @@ export const voiceAPI = {
console.log('语音对话响应数据:', data);
if (data.code === 200) {
// 根据后端实际返回结构提取字段
// 返回完整的data对象包括sttResult、llmResult、ttsResult
const responseData = data.data || {};
// 兼容旧格式:提取关键字段
let aiResponse = null;
let userText = null;
let audioUrl = null;
let audioBase64 = null;
// 从 data.llmResult.response 提取AI回复
if (data.data && data.data.llmResult && data.data.llmResult.response) {
aiResponse = data.data.llmResult.response;
if (responseData.llmResult && responseData.llmResult.response) {
aiResponse = responseData.llmResult.response;
}
// 从 data.sttResult.text 提取用户文本(语音转文字)
if (data.data && data.data.sttResult && data.data.sttResult.text) {
userText = data.data.sttResult.text;
if (responseData.sttResult && responseData.sttResult.text) {
userText = responseData.sttResult.text;
}
// 从 data.ttsResult.audioPath 提取音频路径
if (data.data && data.data.ttsResult && data.data.ttsResult.audioPath) {
audioUrl = data.data.ttsResult.audioPath;
// 从 data.ttsResult 提取音频
if (responseData.ttsResult) {
audioUrl = responseData.ttsResult.audioPath;
audioBase64 = responseData.ttsResult.audioBase64;
}
// 备用字段提取(保持向后兼容)
if (!aiResponse) {
if (data.response && typeof data.response === 'string') {
aiResponse = data.response;
} else if (data.data && data.data.response) {
aiResponse = data.data.response;
} else if (responseData.response) {
aiResponse = responseData.response;
}
}
if (!userText) {
userText = data.userText || data.data?.userText || data.data?.text || data.data?.user_text || data.data?.recognizedText || data.data?.transcription;
userText = data.userText || responseData.userText || responseData.text || responseData.user_text || responseData.recognizedText || responseData.transcription;
}
if (!audioUrl) {
audioUrl = data.audioPath || data.audioUrl || data.data?.audioUrl || data.data?.url || data.data?.audio_url || data.data?.speechUrl || data.data?.ttsUrl || data.data?.audioPath;
if (!audioUrl && !audioBase64) {
audioUrl = data.audioPath || data.audioUrl || responseData.audioUrl || responseData.url || responseData.audio_url || responseData.speechUrl || responseData.ttsUrl || responseData.audioPath;
}
// 清理AI回复文本
@@ -631,9 +655,16 @@ export const voiceAPI = {
resolve({
success: true,
data: {
// 兼容旧接口
userText: userText,
aiResponse: cleanedAiResponse,
audioUrl: audioUrl
audioUrl: audioUrl,
// 新增完整数据结构
sttResult: responseData.sttResult || { text: userText },
llmResult: responseData.llmResult || { response: cleanedAiResponse, inputText: userText },
ttsResult: responseData.ttsResult || { audioPath: audioUrl, audioBase64: audioBase64 },
sessionId: responseData.sessionId || null,
timestamp: responseData.timestamp || Date.now()
}
});
} else {