4.7-production (#1053)

* 4.7-alpha3 (#62)

* doc

* Optimize possible null Pointers and parts of Ux

* fix: mulity index training error

* feat: doc and rename question guide

* fix ios speech input (#59)

* fix: prompt editor variables nowrap (#61)

* change openapi import in http module with curl import (#60)

* chore(ui): dataset import modal ui (#58)

* chore(ui): dataset import modal ui

* use component

* fix height

* 4.7 (#63)

* fix: claude3 image type verification failed (#1038) (#1040)

* perf: curl import modal

* doc img

* perf: adapt cohere rerank

* perf: code

* perf: input style

* doc

---------

Co-authored-by: xiaotian <dimsky@163.com>

* fix: ts

* docker deploy

* perf: prompt call

* doc

* ts

* finish ui

* perf: outlink detail ux

* perf: user schema

* fix: plugin update

* feat: get current time plugin

* fix: ts

* perf: fetch anamation

* perf: mark ux

* doc

* perf: select app ux

* fix: split text custom string conflict

* peref: inform readed

* doc

* memo flow component

* perf: version

* faq

* feat: flow max runtimes

* feat: similarity tip

* feat: auto detect file encoding

* Supports asymmetric vector model

* fix: ts

* perf: max w

* move code

* perf: hide whisper

* fix: ts

* feat: system msg modal

* perf: catch error

* perf: inform tip

* fix: inform

---------

Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
Co-authored-by: xiaotian <dimsky@163.com>
This commit is contained in:
Archer
2024-03-26 12:09:31 +08:00
committed by GitHub
parent ef15ca894e
commit 911512b36d
180 changed files with 2179 additions and 1361 deletions

View File

@@ -8,7 +8,6 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
export const useSpeech = (props?: OutLinkChatAuthProps) => {
const { t } = useTranslation();
const mediaRecorder = useRef<MediaRecorder>();
// const mediaStream = useRef<MediaStream>();
const [mediaStream, setMediaStream] = useState<MediaStream>();
const { toast } = useToast();
const [isSpeaking, setIsSpeaking] = useState(false);
@@ -54,6 +53,7 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
setMediaStream(stream);
mediaRecorder.current = new MediaRecorder(stream);
const chunks: Blob[] = [];
setIsSpeaking(true);
@@ -74,11 +74,18 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
mediaRecorder.current.onstop = async () => {
const formData = new FormData();
const blob = new Blob(chunks, { type: 'audio/webm' });
let options = {};
if (MediaRecorder.isTypeSupported('audio/webm')) {
options = { type: 'audio/webm' };
} else if (MediaRecorder.isTypeSupported('video/mp4')) {
options = { type: 'video/mp4' };
} else {
console.error('no suitable mimetype found for this device');
}
const blob = new Blob(chunks, options);
const duration = Math.round((Date.now() - startTimestamp.current) / 1000);
formData.append('file', blob, 'recording.webm');
formData.append('file', blob, 'recording.mp4');
formData.append(
'data',
JSON.stringify({
@@ -112,7 +119,13 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
};
mediaRecorder.current.start();
} catch (error) {}
} catch (error) {
toast({
status: 'warning',
title: getErrText(error, 'Whisper error')
});
console.log(error);
}
};
const stopSpeak = () => {