feat: vision model (#489)

* mongo init

* perf: mongo connect

* perf: tts

perf: whisper and tts

peref: tts whisper permission

log

reabase (#488)

* perf: modal

* i18n

* perf: schema lean

* feat: vision model format

* perf: tts loading

* perf: static data

* perf: tts

* feat: image

* perf: image

* perf: upload image and title

* perf: image size

* doc

* perf: color

* doc

* speaking can not select file

* doc
This commit is contained in:
Archer
2023-11-18 15:42:35 +08:00
committed by GitHub
parent 70f3373246
commit c5664c7e90
58 changed files with 650 additions and 254 deletions

View File

@@ -1,4 +1,4 @@
import { useState, useCallback, useEffect, useMemo } from 'react';
import { useState, useCallback, useEffect, useMemo, useRef } from 'react';
import { useToast } from '@/web/common/hooks/useToast';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { AppTTSConfigType } from '@/types/app';
@@ -14,6 +14,7 @@ export const useAudioPlay = (props?: { ttsConfig?: AppTTSConfigType }) => {
const [audio, setAudio] = useState<HTMLAudioElement>();
const [audioLoading, setAudioLoading] = useState(false);
const [audioPlaying, setAudioPlaying] = useState(false);
const audioController = useRef(new AbortController());
// Check whether the voice is supported
const hasAudio = useMemo(() => {
@@ -49,12 +50,15 @@ export const useAudioPlay = (props?: { ttsConfig?: AppTTSConfigType }) => {
return resolve({ buffer });
}
audioController.current = new AbortController();
/* request tts */
const response = await fetch('/api/core/chat/item/getSpeech', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
signal: audioController.current.signal,
body: JSON.stringify({
chatItemId,
ttsConfig,
@@ -120,6 +124,7 @@ export const useAudioPlay = (props?: { ttsConfig?: AppTTSConfigType }) => {
audio.src = '';
}
window.speechSynthesis?.cancel();
audioController.current?.abort();
setAudioPlaying(false);
}, [audio]);