feat: 优化语音速度

This commit is contained in:
liqupan
2025-12-06 22:42:05 +08:00
parent 7fe4b05cf8
commit 309b1318a7
7 changed files with 1523 additions and 6 deletions

View File

@@ -71,6 +71,14 @@
</view>
</view>
<!-- 🧪 测试按钮仅测试模式显示 -->
<view v-if="isTestMode" class="test-controls">
<button class="test-btn" @click="sendTestAudio" :disabled="voiceState === 'thinking' || voiceState === 'speaking'">
{{ testButtonText }}
</button>
<text class="test-hint">测试模式点击发送假数据</text>
</view>
</view>
</view>
@@ -261,6 +269,12 @@ import {
getUnreadMessages,
clearUnreadMessages
} from '@/utils/unreadMessages.js';
// 🎙️ 新增导入WebSocket语音流模块
import VoiceStreamWebSocket from '@/utils/voiceStreamWebSocket.js';
import { getWsUrl, API_CONFIG } from '@/utils/config.js';
// 🧪 测试模式配置
const isTestMode = ref(API_CONFIG.TEST_MODE.enabled);
// 获取组件实例
const instance = getCurrentInstance();
@@ -371,6 +385,15 @@ const vadConfig = ref({
silenceDuration: 1500 // 静音判定时间 (ms)
});
// 🎙️ WebSocket 语音流相关
const voiceStreamWs = ref(null);
const isWebSocketMode = ref(true); // 是否使用WebSocket模式默认false使用HTTP
const audioPlayQueue = ref([]); // 音频播放队列
const isPlayingAudio = ref(false); // 是否正在播放音频
const currentSentence = ref(''); // 当前正在显示的句子
const currentChatSessionId = ref(null); // 对话会话ID用于历史记录
const currentVoiceTemplateId = ref(null); // 语音模式使用的模板ID
// AI配置
const availableTemplates = ref([]);
const currentModelId = ref(null);
@@ -397,6 +420,17 @@ const voiceStateText = computed(() => {
return stateMap[voiceState.value] || '准备就绪';
});
// 🧪 测试按钮文本
const testButtonText = computed(() => {
const stateMap = {
idle: '🧪 发送测试音频',
listening: '⏺️ 录音中...',
thinking: '⏳ 处理中...',
speaking: '🔊 回复中...'
};
return stateMap[voiceState.value] || '🧪 发送测试音频';
});
// 根据音量动态生成声纹高度
const voiceWaveStyles = computed(() => {
const barCount = 40;
@@ -602,6 +636,21 @@ onBeforeUnmount(() => {
voiceState.value = 'idle';
currentVolume.value = 0;
// 6. 关闭WebSocket连接
if (voiceStreamWs.value) {
try {
voiceStreamWs.value.close();
voiceStreamWs.value = null;
console.log('🔌 已关闭WebSocket连接');
} catch (error) {
console.warn('关闭WebSocket失败:', error);
}
}
// 7. 清空音频播放队列
audioPlayQueue.value = [];
isPlayingAudio.value = false;
console.log('✅ 资源清理完成');
});
@@ -1490,7 +1539,7 @@ const clearContext = async () => {
};
// 🎧 切换语音模式
const toggleVoiceMode = () => {
const toggleVoiceMode = async () => {
isVoiceMode.value = !isVoiceMode.value;
if (isVoiceMode.value) {
@@ -1498,14 +1547,116 @@ const toggleVoiceMode = () => {
voiceState.value = 'idle';
isVoiceModeInterrupted.value = false; // 重置中断标志
// 🎙️ 建立WebSocket连接如果启用WebSocket模式
if (isWebSocketMode.value) {
try {
const wsUrl = getWsUrl(API_CONFIG.WS_ENDPOINTS.VOICE_STREAM);
voiceStreamWs.value = new VoiceStreamWebSocket(wsUrl);
// 设置事件监听器
voiceStreamWs.value.on('connected', () => {
console.log('[VoiceMode] WebSocket已连接');
});
voiceStreamWs.value.on('sttResult', (text) => {
console.log('[VoiceMode] STT结果:', text);
addMessage('user', text);
currentSentence.value = '';
});
voiceStreamWs.value.on('llmToken', (token) => {
// 可选显示LLM输出
console.log('[VoiceMode] LLM Token:', token);
});
voiceStreamWs.value.on('sentence', (sentence) => {
console.log('[VoiceMode] 完整句子:', sentence);
currentSentence.value = sentence;
addMessage('ai', sentence);
});
voiceStreamWs.value.on('audioChunk', (audioData) => {
// 将音频数据加入播放队列
audioPlayQueue.value.push(audioData);
processAudioQueue();
});
voiceStreamWs.value.on('complete', () => {
console.log('[VoiceMode] 对话完成,等待播放队列清空');
// 等待播放队列清空后再设置idle
const checkQueueEmpty = () => {
if (audioPlayQueue.value.length === 0 && !isPlayingAudio.value) {
console.log('[VoiceMode] 播放队列已清空切换到idle状态');
voiceState.value = 'idle';
// 如果是自动模式,重新开始录音
if (isAutoVoiceMode.value && isVoiceMode.value) {
console.log('↺ 自动模式:重新开始监听');
startVoiceRecording();
}
} else {
console.log('[VoiceMode] 队列还有数据或正在播放100ms后再检查');
setTimeout(checkQueueEmpty, 100);
}
};
checkQueueEmpty();
});
voiceStreamWs.value.on('error', (error) => {
console.error('[VoiceMode] 错误:', error);
uni.showToast({
title: '语音处理失败: ' + error,
icon: 'none',
duration: 2000
});
voiceState.value = 'idle';
});
voiceStreamWs.value.on('disconnected', () => {
console.log('[VoiceMode] WebSocket已断开');
});
// 复用文字对话的 conversationId保持历史记录一致
// 如果还没有 conversationId会在initializeConversation中生成
if (!conversationId.value) {
await initializeConversation();
}
// 使用当前角色的templateId如果有的话
currentVoiceTemplateId.value = currentCharacter.value.templateId || currentCharacter.value.roleId || null;
console.log('[VoiceMode] 连接参数 - SessionId:', conversationId.value, 'TemplateId:', currentVoiceTemplateId.value);
// 连接WebSocket传递与文字对话相同的sessionId和templateId
await voiceStreamWs.value.connect(
conversationId.value,
currentVoiceTemplateId.value,
userStore.token,
userStore.userId
);
} catch (error) {
console.error('[VoiceMode] WebSocket连接失败:', error);
uni.showToast({
title: 'WebSocket连接失败',
icon: 'none',
duration: 2000
});
isVoiceMode.value = false;
return;
}
}
uni.showToast({
title: '已切换到语音模式',
title: '已切换到语音模式' + (isWebSocketMode.value ? '(实时流式)' : '') + (isTestMode.value ? '(测试)' : ''),
icon: 'none',
duration: 1500
});
// 如果是自动模式,直接开始监听
if (isAutoVoiceMode.value) {
// 如果是自动模式且非测试模式,直接开始监听
if (isAutoVoiceMode.value && !isTestMode.value) {
startVoiceRecording();
}
} else {
@@ -1548,6 +1699,22 @@ const toggleVoiceMode = () => {
voiceState.value = 'idle';
currentVolume.value = 0;
// 6. 关闭WebSocket连接
if (voiceStreamWs.value) {
try {
voiceStreamWs.value.close();
voiceStreamWs.value = null;
console.log('🔌 已关闭WebSocket连接');
} catch (error) {
console.warn('关闭WebSocket失败:', error);
}
}
// 7. 清空音频播放队列
audioPlayQueue.value = [];
isPlayingAudio.value = false;
currentSentence.value = '';
uni.showToast({
title: '已切换到文本模式',
icon: 'none',
@@ -1763,7 +1930,38 @@ const handleVoiceModeMessage = async (filePath) => {
try {
console.log('🎧 语音模式:开始处理录音文件', filePath);
// 调用后端voice-chat接口
// 🎙️ WebSocket模式通过WebSocket发送音频
if (isWebSocketMode.value && voiceStreamWs.value && voiceStreamWs.value.isConnected) {
// 读取音频文件并通过WebSocket发送
const fs = uni.getFileSystemManager();
console.log(wx.env.USER_DATA_PATH)
fs.readFile({
filePath: filePath,
success: (res) => {
console.log('[VoiceMode] 读取音频文件成功,大小:', res.data.byteLength);
// 发送音频数据
const success = voiceStreamWs.value.sendAudio(res.data);
if (!success) {
throw new Error('发送音频数据失败');
}
console.log('[VoiceMode] 音频已通过WebSocket发送');
},
fail: (err) => {
console.error('[VoiceMode] 读取音频文件失败:', err);
uni.showToast({
title: '读取音频文件失败',
icon: 'none',
duration: 2000
});
voiceState.value = 'idle';
}
});
return;
}
// HTTP模式调用后端voice-chat接口
const result = await voiceAPI.voiceChat(filePath, {
sessionId: conversationId.value, // 使用当前会话ID保持上下文
modelId: currentCharacter.value.modelId || 10,
@@ -1851,6 +2049,247 @@ const handleVoiceModeMessage = async (filePath) => {
}
};
// 🧪 加载测试PCM音频数据
const generateTestPCMAudio = () => {
return new Promise((resolve, reject) => {
const config = API_CONFIG.TEST_MODE;
// 方式1: 优先使用base64数据
if (config.testAudioBase64 && config.testAudioBase64.trim() !== '') {
try {
console.log('[TestMode] 从base64加载测试音频数据');
const arrayBuffer = uni.base64ToArrayBuffer(config.testAudioBase64);
console.log('[TestMode] 测试音频数据已加载:', {
dataSize: arrayBuffer.byteLength + ' bytes',
expectedFormat: '16000Hz, 16bit, 单声道, Little Endian'
});
resolve(arrayBuffer);
} catch (error) {
console.error('[TestMode] base64解码失败:', error);
reject(new Error('base64数据解码失败'));
}
return;
}
// 方式2: 从文件路径读取
if (config.testAudioPath && config.testAudioPath.trim() !== '') {
// #ifdef MP-WEIXIN
const fs = uni.getFileSystemManager();
const testAudioPath = wx.env.USER_DATA_PATH + "/output.pcm";
fs.readFile({
filePath: testAudioPath,
success: (res) => {
console.log('[TestMode] 从文件加载测试音频数据:', config.testAudioPath);
console.log('[TestMode] 文件大小:', res.data.byteLength + ' bytes');
resolve(res.data);
},
fail: (err) => {
console.error('[TestMode] 读取音频文件失败:', err);
reject(new Error('读取音频文件失败: ' + JSON.stringify(err)));
}
});
// #endif
// #ifndef MP-WEIXIN
reject(new Error('文件读取仅支持微信小程序'));
// #endif
return;
}
// 如果两个都没有配置,提示错误
reject(new Error('请在config.js中配置 testAudioBase64 或 testAudioPath'));
});
};
// 🧪 发送测试音频数据
const sendTestAudio = async () => {
if (!isVoiceMode.value) {
uni.showToast({
title: '请先进入语音模式',
icon: 'none',
duration: 2000
});
return;
}
if (voiceState.value === 'thinking' || voiceState.value === 'speaking') {
uni.showToast({
title: '正在处理中,请稍候',
icon: 'none',
duration: 1500
});
return;
}
try {
console.log('[TestMode] 开始发送测试音频数据');
voiceState.value = 'listening';
// 加载测试PCM数据
const testAudioData = await generateTestPCMAudio();
// 模拟VAD检测等待一段时间模拟说话
await new Promise(resolve => setTimeout(resolve, 1000));
// 切换到思考状态
voiceState.value = 'thinking';
// WebSocket模式直接发送ArrayBuffer
if (isWebSocketMode.value && voiceStreamWs.value && voiceStreamWs.value.isConnected) {
const success = voiceStreamWs.value.sendAudio(testAudioData);
if (!success) {
throw new Error('发送音频数据失败');
}
console.log('[TestMode] 测试音频已通过WebSocket发送');
} else {
// HTTP模式需要先保存为文件然后调用接口
// #ifdef MP-WEIXIN
const fs = uni.getFileSystemManager();
const filePath = `${wx.env.USER_DATA_PATH}/test_audio_${Date.now()}.pcm`;
// 将ArrayBuffer转为base64
const base64 = uni.arrayBufferToBase64(testAudioData);
await new Promise((resolve, reject) => {
fs.writeFile({
filePath: filePath,
data: base64,
encoding: 'base64',
success: () => {
console.log('[TestMode] 测试音频文件已保存:', filePath);
// 调用现有的处理逻辑
handleVoiceModeMessage(filePath);
resolve();
},
fail: (err) => {
console.error('[TestMode] 保存测试音频文件失败:', err);
reject(err);
}
});
});
// #endif
// #ifndef MP-WEIXIN
uni.showToast({
title: 'HTTP模式仅支持微信小程序',
icon: 'none',
duration: 2000
});
voiceState.value = 'idle';
// #endif
}
} catch (error) {
console.error('[TestMode] 发送测试音频失败:', error);
uni.showToast({
title: '发送测试音频失败',
icon: 'none',
duration: 2000
});
voiceState.value = 'idle';
}
};
// 🎙️ 处理音频播放队列WebSocket流式音频
const processAudioQueue = async () => {
if (isPlayingAudio.value || audioPlayQueue.value.length === 0) {
return;
}
isPlayingAudio.value = true;
voiceState.value = 'speaking';
// #ifdef MP-WEIXIN
try {
// 开始播放队列中的音频
while (audioPlayQueue.value.length > 0 && isVoiceMode.value) {
const audioData = audioPlayQueue.value.shift();
// 将ArrayBuffer转换为临时文件MP3格式
const fs = uni.getFileSystemManager();
const filePath = `${wx.env.USER_DATA_PATH}/stream_audio_${Date.now()}.mp3`;
await new Promise((resolve, reject) => {
// 将ArrayBuffer转为base64
const base64 = uni.arrayBufferToBase64(audioData);
fs.writeFile({
filePath: filePath,
data: base64,
encoding: 'base64',
success: () => {
console.log('[AudioQueue] MP3文件写入成功:', filePath);
// 播放音频
if (audioContext.value) {
audioContext.value.destroy();
}
uni.setInnerAudioOption({
obeyMuteSwitch: false,
speakerOn: true
});
audioContext.value = uni.createInnerAudioContext();
audioContext.value.autoplay = false;
audioContext.value.obeyMuteSwitch = false;
audioContext.value.volume = 1;
audioContext.value.loop = false;
audioContext.value.src = filePath;
audioContext.value.onCanplay(() => {
console.log('[AudioQueue] 音频可以播放,开始播放');
if (audioContext.value?.paused) {
audioContext.value.play();
}
});
audioContext.value.onPlay(() => {
console.log('[AudioQueue] 开始播放音频块');
startVolumeSimulation();
});
audioContext.value.onEnded(() => {
console.log('[AudioQueue] 音频块播放完成');
stopVolumeSimulation();
audioContext.value?.destroy();
audioContext.value = null;
resolve();
});
audioContext.value.onError((err) => {
console.error('[AudioQueue] 音频播放错误:', err);
stopVolumeSimulation();
audioContext.value?.destroy();
audioContext.value = null;
resolve(); // 继续播放下一个
});
// 尝试立即播放某些情况下不会触发onCanplay
audioContext.value.play();
},
fail: (err) => {
console.error('[AudioQueue] 写入文件失败:', err);
reject(err);
}
});
});
}
} catch (error) {
console.error('[AudioQueue] 播放队列处理失败:', error);
} finally {
isPlayingAudio.value = false;
// 检查队列是否还有数据
if (audioPlayQueue.value.length > 0 && isVoiceMode.value) {
processAudioQueue();
}
// 不要在队列为空时设置idle应该由complete事件来控制状态
// 因为队列为空不代表对话完成,后端可能还在合成下一句音频
}
// #endif
};
// 🎧 播放Base64编码的音频
const playVoiceFromBase64 = async (audioBase64, text = '') => {
return new Promise((resolve, reject) => {
@@ -2823,4 +3262,42 @@ page {
background: linear-gradient(180deg, #f9e076 0%, #f5d042 100%);
}
/* 🧪 测试模式样式 */
.test-controls {
margin-top: 80rpx;
display: flex;
flex-direction: column;
align-items: center;
gap: 20rpx;
}
.test-btn {
padding: 24rpx 60rpx;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border-radius: 50rpx;
font-size: 32rpx;
font-weight: 500;
border: none;
box-shadow: 0 8rpx 24rpx rgba(102, 126, 234, 0.4);
transition: all 0.3s ease;
}
.test-btn:active {
transform: scale(0.95);
box-shadow: 0 4rpx 12rpx rgba(102, 126, 234, 0.3);
}
.test-btn[disabled] {
background: linear-gradient(135deg, #bbb 0%, #999 100%);
box-shadow: 0 4rpx 12rpx rgba(0, 0, 0, 0.1);
opacity: 0.6;
}
.test-hint {
font-size: 24rpx;
color: rgba(255, 255, 255, 0.6);
text-align: center;
}
</style>

BIN
src/static/output.pcm Normal file

Binary file not shown.

View File

@@ -1,11 +1,32 @@
// API配置统一管理
export const API_CONFIG = {
// 基础API地址
BASE_URL: 'https://api.aixsy.com.cn',
BASE_URL: 'http://192.168.3.13:8091',
// WebSocket地址
WS_BASE_URL: 'ws://192.168.3.13:8091',
// 其他服务地址(如果需要)
WEB_URL: 'https://www.aixsy.com.cn',
// 🧪 测试模式配置
TEST_MODE: {
// 是否启用测试模式true: 显示测试按钮,禁用录音; false: 正常录音模式)
enabled: false,
// 📝 测试音频数据base64编码的PCM数据
// 格式要求:
// - 采样率: 16000 Hz
// - 位深度: 16 bit (有符号整数)
// - 声道数: 1 (单声道)
// - 字节序: Little Endian (小端序)
// - 无文件头纯PCM数据
testAudioBase64: '', // 👈 在这里填入你的base64编码的PCM数据
// 或者使用文件路径优先使用base64
testAudioPath: 'src/static/output.pcm', // 例如: '/static/test_audio.pcm'
},
// API端点
ENDPOINTS: {
// 登录相关
@@ -44,6 +65,11 @@ export const API_CONFIG = {
CONFIG_TTS: '/app/config/tts'
},
// WebSocket端点
WS_ENDPOINTS: {
VOICE_STREAM: '/ws/voice-stream'
},
// 请求超时时间(毫秒)
TIMEOUT: 30000,
@@ -61,3 +87,8 @@ export const getWebUrl = (endpoint) => {
return API_CONFIG.WEB_URL + endpoint;
};
// 导出WebSocket URL构建函数
export const getWsUrl = (endpoint) => {
return API_CONFIG.WS_BASE_URL + endpoint;
};

View File

@@ -0,0 +1,310 @@
/**
* 语音流式对话 WebSocket 管理模块
*/
class VoiceStreamWebSocket {
constructor(url) {
this.url = url
this.ws = null
this.isConnected = false
this.reconnectAttempts = 0
this.maxReconnectAttempts = 5
this.reconnectDelay = 2000
this.manualClose = false // 标记是否为主动关闭
// 事件监听器
this.listeners = {
onConnected: null,
onDisconnected: null,
onSttResult: null,
onLlmToken: null,
onSentence: null,
onAudioChunk: null,
onComplete: null,
onError: null
}
}
/**
* 建立连接
* @param {String} sessionId - 聊天会话ID用于历史记录查询和保存与文字对话的sessionId保持一致
* @param {Number} templateId - 模板ID
* @param {String} token - 认证令牌(可选)
* @param {String} userId - 用户ID可选
*/
connect(sessionId = null, templateId = null, token = null, userId = null) {
return new Promise((resolve, reject) => {
try {
// 重置主动关闭标志
this.manualClose = false
// 构建连接URL
let wsUrl = this.url
const params = []
if (sessionId) params.push(`sessionId=${sessionId}`)
if (templateId) params.push(`templateId=${templateId}`)
if (token) params.push(`token=${token}`)
if (userId) params.push(`userId=${userId}`)
if (params.length > 0) {
wsUrl += '?' + params.join('&')
}
console.log('[VoiceStreamWS] 正在连接:', wsUrl)
// 创建WebSocket连接
this.ws = uni.connectSocket({
url: wsUrl,
success: () => {
console.log('[VoiceStreamWS] WebSocket创建成功')
},
fail: (err) => {
console.error('[VoiceStreamWS] WebSocket创建失败:', err)
reject(err)
}
})
// 连接打开
this.ws.onOpen(() => {
console.log('[VoiceStreamWS] 连接已建立')
this.isConnected = true
this.reconnectAttempts = 0
if (this.listeners.onConnected) {
this.listeners.onConnected()
}
resolve()
})
// 接收消息
this.ws.onMessage((res) => {
this.handleMessage(res)
})
// 连接错误
this.ws.onError((err) => {
console.error('[VoiceStreamWS] 连接错误:', err)
this.isConnected = false
if (this.listeners.onError) {
this.listeners.onError('WebSocket连接错误')
}
})
// 连接关闭
this.ws.onClose(() => {
console.log('[VoiceStreamWS] 连接已关闭')
this.isConnected = false
if (this.listeners.onDisconnected) {
this.listeners.onDisconnected()
}
// 只有非主动关闭才尝试重连
if (!this.manualClose) {
console.log('[VoiceStreamWS] 检测到异常断开,将尝试自动重连')
this.tryReconnect()
} else {
console.log('[VoiceStreamWS] 主动关闭连接,不进行重连')
}
})
} catch (err) {
console.error('[VoiceStreamWS] 连接异常:', err)
reject(err)
}
})
}
/**
* 处理接收到的消息
*/
handleMessage(res) {
try {
// 二进制消息(音频数据)
if (res.data instanceof ArrayBuffer) {
console.log('[VoiceStreamWS] 收到音频数据:', res.data.byteLength, 'bytes')
if (this.listeners.onAudioChunk) {
this.listeners.onAudioChunk(res.data)
}
return
}
// 文本消息JSON格式
const message = JSON.parse(res.data)
console.log('[VoiceStreamWS] 收到消息:', message.type)
switch (message.type) {
case 'connected':
console.log('[VoiceStreamWS] 服务器确认连接')
break
case 'stt_result':
// STT识别结果
if (this.listeners.onSttResult) {
this.listeners.onSttResult(message.message)
}
break
case 'llm_token':
// LLM输出token
if (this.listeners.onLlmToken) {
this.listeners.onLlmToken(message.message)
}
break
case 'sentence':
// 完整句子
if (this.listeners.onSentence) {
this.listeners.onSentence(message.message)
}
break
case 'complete':
// 对话完成
if (this.listeners.onComplete) {
this.listeners.onComplete()
}
break
case 'error':
// 错误
console.error('[VoiceStreamWS] 服务器错误:', message.message)
if (this.listeners.onError) {
this.listeners.onError(message.message)
}
break
case 'pong':
// 心跳响应
console.log('[VoiceStreamWS] 心跳响应')
break
default:
console.warn('[VoiceStreamWS] 未知消息类型:', message.type)
}
} catch (err) {
console.error('[VoiceStreamWS] 处理消息失败:', err)
}
}
/**
* 发送音频数据
*/
sendAudio(audioData) {
if (!this.isConnected || !this.ws) {
console.error('[VoiceStreamWS] 未连接,无法发送音频')
return false
}
try {
this.ws.send({
data: audioData,
success: () => {
console.log('[VoiceStreamWS] 音频数据发送成功:', audioData.byteLength, 'bytes')
},
fail: (err) => {
console.error('[VoiceStreamWS] 音频数据发送失败:', err)
if (this.listeners.onError) {
this.listeners.onError('发送音频失败')
}
}
})
return true
} catch (err) {
console.error('[VoiceStreamWS] 发送音频异常:', err)
return false
}
}
/**
* 发送文本消息
*/
sendMessage(type, data = null) {
if (!this.isConnected || !this.ws) {
console.error('[VoiceStreamWS] 未连接,无法发送消息')
return false
}
try {
const message = {
type,
timestamp: Date.now()
}
if (data) {
message.data = data
}
this.ws.send({
data: JSON.stringify(message),
success: () => {
console.log('[VoiceStreamWS] 消息发送成功:', type)
},
fail: (err) => {
console.error('[VoiceStreamWS] 消息发送失败:', err)
}
})
return true
} catch (err) {
console.error('[VoiceStreamWS] 发送消息异常:', err)
return false
}
}
/**
* 取消当前对话(打断)
*/
cancel() {
return this.sendMessage('cancel')
}
/**
* 发送心跳
*/
ping() {
return this.sendMessage('ping')
}
/**
* 尝试重连
*/
tryReconnect() {
if (this.reconnectAttempts >= this.maxReconnectAttempts) {
console.log('[VoiceStreamWS] 达到最大重连次数,停止重连')
return
}
this.reconnectAttempts++
console.log(`[VoiceStreamWS] 尝试重连 (${this.reconnectAttempts}/${this.maxReconnectAttempts})`)
setTimeout(() => {
if (!this.isConnected) {
this.connect().catch(err => {
console.error('[VoiceStreamWS] 重连失败:', err)
})
}
}, this.reconnectDelay)
}
/**
* 关闭连接
*/
close() {
if (this.ws) {
console.log('[VoiceStreamWS] 主动关闭连接')
this.manualClose = true // 标记为主动关闭
this.isConnected = false
this.reconnectAttempts = 0 // 重置重连计数
this.ws.close()
this.ws = null
}
}
/**
* 设置事件监听器
*/
on(event, callback) {
if (this.listeners.hasOwnProperty(`on${event.charAt(0).toUpperCase()}${event.slice(1)}`)) {
this.listeners[`on${event.charAt(0).toUpperCase()}${event.slice(1)}`] = callback
}
}
}
export default VoiceStreamWebSocket