V4.9.4 feature (#4470)

* Training status (#4424)

* dataset data training state (#4311)

* dataset data training state

* fix

* fix ts

* fix

* fix api format

* fix

* fix

* perf: count training

* format

* fix: dataset training state (#4417)

* fix

* add test

* fix

* fix

* fix test

* fix test

* perf: training count

* count

* loading status

---------

Co-authored-by: heheer <heheer@sealos.io>

* doc

* website sync feature (#4429)

* perf: introduce BullMQ for website sync (#4403)

* perf: introduce BullMQ for website sync

* feat: new redis module

* fix: remove graceful shutdown

* perf: improve UI in dataset detail

- Updated the "change" icon SVG file.
- Modified i18n strings.
- Added new i18n string "immediate_sync".
- Improved UI in dataset detail page, including button icons and
background colors.

* refactor: Add chunkSettings to DatasetSchema

* perf: website sync ux

* env template

* fix: clean up website dataset when updating chunk settings (#4420)

* perf: check setting updated

* perf: worker currency

* feat: init script for website sync refactor (#4425)

* website feature doc

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>

* pro migration (#4388) (#4433)

* pro migration

* reuse customPdfParseType

Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>

* perf: remove loading ui

* feat: config chat file expired time

* Redis cache (#4436)

* perf: add Redis cache for vector counting (#4432)

* feat: cache

* perf: get cache key

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>

* perf: mobile voice input (#4437)

* update:Mobile voice interaction (#4362)

* Add files via upload

* Add files via upload

* Update ollama.md

* Update ollama.md

* Add files via upload

* Update useSpeech.ts

* Update ChatInput.tsx

* Update useSpeech.ts

* Update ChatInput.tsx

* Update useSpeech.ts

* Update constants.ts

* Add files via upload

* Update ChatInput.tsx

* Update useSpeech.ts

* Update useSpeech.ts

* Update useSpeech.ts

* Update ChatInput.tsx

* Add files via upload

* Update common.json

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update useSpeech.ts

* Update useSpeech.ts

* Update common.json

* Update common.json

* Update common.json

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update useSpeech.ts

* Update common.json

* Update chat.json

* Update common.json

* Update chat.json

* Update common.json

* Update chat.json

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update useSpeech.ts

* Update VoiceInput.tsx

* speech ui

* 优化语音输入组件,调整输入框显示逻辑,修复语音输入遮罩层样式,更新画布背景透明度,增强用户交互体验。 (#4435)

* perf: mobil voice input

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>

* Test completion v2 (#4438)

* add v2 completions (#4364)

* add v2 completions

* completion config

* config version

* fix

* frontend

* doc

* fix

* fix: completions v2 api

---------

Co-authored-by: heheer <heheer@sealos.io>

* package

* Test mongo log (#4443)

* feat: mongodb-log (#4426)

* perf: mongo log

* feat: completions stop reasoner

* mongo db log

---------

Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>

* update doc

* Update doc

* fix external var ui (#4444)

* action

* fix: ts (#4458)

* preview doc action

add docs preview permission

update preview action

udpate action

* update doc (#4460)

* update preview action

* update doc

* remove

* update

* schema

* update mq export;perf: redis cache  (#4465)

* perf: redis cache

* update mq export

* perf: website sync error tip

* add error worker

* website sync ui (#4466)

* Updated the dynamic display of the voice input pop-up (#4469)

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* fix: voice input

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
Co-authored-by: dreamer6680 <1468683855@qq.com>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
This commit is contained in:
Archer
2025-04-08 12:05:04 +08:00
committed by GitHub
parent 5839325f77
commit f642c9603b
151 changed files with 5434 additions and 1354 deletions

View File

@@ -99,7 +99,6 @@ const SettingLLMModel = ({
<AISettingModal
onClose={onCloseAIChatSetting}
onSuccess={(e) => {
console.log(e);
onChange(e);
onCloseAIChatSetting();
}}

View File

@@ -1,7 +1,6 @@
import { useSpeech } from '@/web/common/hooks/useSpeech';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import { Box, Flex, Spinner, Textarea } from '@chakra-ui/react';
import React, { useRef, useEffect, useCallback, useMemo } from 'react';
import React, { useRef, useEffect, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'next-i18next';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import MyIcon from '@fastgpt/web/components/common/Icon';
@@ -18,6 +17,7 @@ import FilePreview from '../../components/FilePreview';
import { useFileUpload } from '../hooks/useFileUpload';
import ComplianceTip from '@/components/common/ComplianceTip/index';
import { useToast } from '@fastgpt/web/hooks/useToast';
import VoiceInput, { type VoiceInputComponentRef } from './VoiceInput';
const InputGuideBox = dynamic(() => import('./InputGuideBox'));
@@ -44,6 +44,7 @@ const ChatInput = ({
const { t } = useTranslation();
const { toast } = useToast();
const { isPc } = useSystem();
const VoiceInputRef = useRef<VoiceInputComponentRef>(null);
const { setValue, watch, control } = chatForm;
const inputValue = watch('input');
@@ -53,7 +54,6 @@ const ChatInput = ({
const chatId = useContextSelector(ChatBoxContext, (v) => v.chatId);
const isChatting = useContextSelector(ChatBoxContext, (v) => v.isChatting);
const whisperConfig = useContextSelector(ChatBoxContext, (v) => v.whisperConfig);
const autoTTSResponse = useContextSelector(ChatBoxContext, (v) => v.autoTTSResponse);
const chatInputGuide = useContextSelector(ChatBoxContext, (v) => v.chatInputGuide);
const fileSelectConfig = useContextSelector(ChatBoxContext, (v) => v.fileSelectConfig);
@@ -106,86 +106,6 @@ const ChatInput = ({
[TextareaDom, canSendMessage, fileList, onSendMessage, replaceFiles]
);
/* whisper init */
const canvasRef = useRef<HTMLCanvasElement>(null);
const {
isSpeaking,
isTransCription,
stopSpeak,
startSpeak,
speakingTimeString,
renderAudioGraph,
stream
} = useSpeech({ appId, ...outLinkAuthData });
const onWhisperRecord = useCallback(() => {
const finishWhisperTranscription = (text: string) => {
if (!text) return;
if (whisperConfig?.autoSend) {
onSendMessage({
text,
files: fileList,
autoTTSResponse
});
replaceFiles([]);
} else {
resetInputVal({ text });
}
};
if (isSpeaking) {
return stopSpeak();
}
startSpeak(finishWhisperTranscription);
}, [
autoTTSResponse,
fileList,
isSpeaking,
onSendMessage,
replaceFiles,
resetInputVal,
startSpeak,
stopSpeak,
whisperConfig?.autoSend
]);
useEffect(() => {
if (!stream) {
return;
}
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
analyser.fftSize = 4096;
analyser.smoothingTimeConstant = 1;
const source = audioContext.createMediaStreamSource(stream);
source.connect(analyser);
const renderCurve = () => {
if (!canvasRef.current) return;
renderAudioGraph(analyser, canvasRef.current);
window.requestAnimationFrame(renderCurve);
};
renderCurve();
}, [renderAudioGraph, stream]);
const RenderTranslateLoading = useMemo(
() => (
<Flex
position={'absolute'}
top={0}
bottom={0}
left={0}
right={0}
zIndex={10}
pl={5}
alignItems={'center'}
bg={'white'}
color={'primary.500'}
visibility={isSpeaking && isTransCription ? 'visible' : 'hidden'}
>
<Spinner size={'sm'} mr={4} />
{t('common:core.chat.Converting to text')}
</Flex>
),
[isSpeaking, isTransCription, t]
);
const RenderTextarea = useMemo(
() => (
<Flex alignItems={'flex-end'} mt={fileList.length > 0 ? 1 : 0} pl={[2, 4]}>
@@ -198,7 +118,6 @@ const ChatInput = ({
cursor={'pointer'}
transform={'translateY(1px)'}
onClick={() => {
if (isSpeaking) return;
onOpenSelectFile();
}}
>
@@ -208,7 +127,6 @@ const ChatInput = ({
<File onSelect={(files) => onSelectFile({ files })} />
</Flex>
)}
{/* input area */}
<Textarea
ref={TextareaDom}
@@ -220,11 +138,7 @@ const ChatInput = ({
border: 'none'
}}
placeholder={
isSpeaking
? t('common:core.chat.Speaking')
: isPc
? t('common:core.chat.Type a message')
: t('chat:input_placeholder_phone')
isPc ? t('common:core.chat.Type a message') : t('chat:input_placeholder_phone')
}
resize={'none'}
rows={1}
@@ -237,9 +151,8 @@ const ChatInput = ({
wordBreak={'break-all'}
boxShadow={'none !important'}
color={'myGray.900'}
isDisabled={isSpeaking}
value={inputValue}
fontSize={['md', 'sm']}
value={inputValue}
onChange={(e) => {
const textarea = e.target;
textarea.style.height = textareaMinH;
@@ -290,118 +203,78 @@ const ChatInput = ({
}
}}
/>
<Flex alignItems={'center'} position={'absolute'} right={[2, 4]} bottom={['10px', '12px']}>
{/* voice-input */}
{whisperConfig?.open && !inputValue && !isChatting && (
<>
<canvas
ref={canvasRef}
style={{
height: '30px',
width: isSpeaking && !isTransCription ? '100px' : 0,
background: 'white',
zIndex: 0
<Flex
alignItems={'center'}
position={'absolute'}
right={[2, 4]}
bottom={['10px', '12px']}
zIndex={3}
>
{/* Voice input icon */}
{whisperConfig?.open && !inputValue && (
<MyTooltip label={t('common:core.chat.Record')}>
<Flex
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['28px', '32px']}
w={['28px', '32px']}
mr={2}
borderRadius={'md'}
cursor={'pointer'}
_hover={{ bg: '#F5F5F8' }}
onClick={() => {
VoiceInputRef.current?.onSpeak?.();
}}
/>
{isSpeaking && (
<MyTooltip label={t('common:core.chat.Cancel Speak')}>
<Flex
mr={2}
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['26px', '32px']}
w={['26px', '32px']}
borderRadius={'md'}
cursor={'pointer'}
_hover={{ bg: '#F5F5F8' }}
onClick={() => stopSpeak(true)}
>
<MyIcon
name={'core/chat/cancelSpeak'}
width={['20px', '22px']}
height={['20px', '22px']}
/>
</Flex>
</MyTooltip>
)}
<MyTooltip
label={
isSpeaking ? t('common:core.chat.Finish Speak') : t('common:core.chat.Record')
}
>
<Flex
mr={2}
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['26px', '32px']}
w={['26px', '32px']}
borderRadius={'md'}
cursor={'pointer'}
_hover={{ bg: '#F5F5F8' }}
onClick={onWhisperRecord}
>
<MyIcon
name={isSpeaking ? 'core/chat/finishSpeak' : 'core/chat/recordFill'}
width={['20px', '22px']}
height={['20px', '22px']}
color={isSpeaking ? 'primary.500' : 'myGray.600'}
/>
</Flex>
</MyTooltip>
</>
)}
{/* send and stop icon */}
{isSpeaking ? (
<Box color={'#5A646E'} w={'36px'} textAlign={'right'} whiteSpace={'nowrap'}>
{speakingTimeString}
</Box>
) : (
<Flex
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['28px', '32px']}
w={['28px', '32px']}
borderRadius={'md'}
bg={
isSpeaking || isChatting
? ''
: !havInput || hasFileUploading
? '#E5E5E5'
: 'primary.500'
}
cursor={havInput ? 'pointer' : 'not-allowed'}
lineHeight={1}
onClick={() => {
if (isChatting) {
return onStop();
}
return handleSend();
}}
>
{isChatting ? (
<MyIcon
animation={'zoomStopIcon 0.4s infinite alternate'}
name={'core/chat/recordFill'}
width={['22px', '25px']}
height={['22px', '25px']}
cursor={'pointer'}
name={'stop'}
color={'gray.500'}
color={'myGray.600'}
/>
) : (
<MyTooltip label={t('common:core.chat.Send Message')}>
<MyIcon
name={'core/chat/sendFill'}
width={['18px', '20px']}
height={['18px', '20px']}
color={'white'}
/>
</MyTooltip>
)}
</Flex>
</Flex>
</MyTooltip>
)}
{/* send and stop icon */}
<Flex
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['28px', '32px']}
w={['28px', '32px']}
borderRadius={'md'}
bg={isChatting ? '' : !havInput || hasFileUploading ? '#E5E5E5' : 'primary.500'}
cursor={havInput ? 'pointer' : 'not-allowed'}
lineHeight={1}
onClick={() => {
if (isChatting) {
return onStop();
}
return handleSend();
}}
>
{isChatting ? (
<MyIcon
animation={'zoomStopIcon 0.4s infinite alternate'}
width={['22px', '25px']}
height={['22px', '25px']}
cursor={'pointer'}
name={'stop'}
color={'gray.500'}
/>
) : (
<MyTooltip label={t('common:core.chat.Send Message')}>
<MyIcon
name={'core/chat/sendFill'}
width={['18px', '20px']}
height={['18px', '20px']}
color={'white'}
/>
</MyTooltip>
)}
</Flex>
</Flex>
</Flex>
),
@@ -415,21 +288,15 @@ const ChatInput = ({
inputValue,
isChatting,
isPc,
isSpeaking,
isTransCription,
onOpenSelectFile,
onSelectFile,
onStop,
onWhisperRecord,
selectFileIcon,
selectFileLabel,
setValue,
showSelectFile,
showSelectImg,
speakingTimeString,
stopSpeak,
t,
whisperConfig?.open
t
]
);
@@ -468,7 +335,7 @@ const ChatInput = ({
pt={fileList.length > 0 ? '0' : ['14px', '18px']}
pb={['14px', '18px']}
position={'relative'}
boxShadow={isSpeaking ? `0 0 10px rgba(54,111,255,0.4)` : `0 0 10px rgba(0,0,0,0.2)`}
boxShadow={`0 0 10px rgba(0,0,0,0.2)`}
borderRadius={['none', 'md']}
bg={'white'}
overflow={'display'}
@@ -495,15 +362,20 @@ const ChatInput = ({
}}
/>
)}
{/* translate loading */}
{RenderTranslateLoading}
{/* file preview */}
<Box px={[1, 3]}>
<FilePreview fileList={fileList} removeFiles={removeFiles} />
</Box>
{/* voice input and loading container */}
{!inputValue && (
<VoiceInput
ref={VoiceInputRef}
onSendMessage={onSendMessage}
resetInputVal={resetInputVal}
/>
)}
{RenderTextarea}
</Box>
<ComplianceTip type={'chat'} />

View File

@@ -0,0 +1,369 @@
import { useSpeech } from '@/web/common/hooks/useSpeech';
import { Box, Flex, HStack, Spinner } from '@chakra-ui/react';
import React, {
useRef,
useEffect,
useCallback,
useState,
forwardRef,
useImperativeHandle,
useMemo
} from 'react';
import { useTranslation } from 'next-i18next';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { useSystem } from '@fastgpt/web/hooks/useSystem';
import { useContextSelector } from 'use-context-selector';
import { ChatBoxContext } from '../Provider';
import MyIconButton from '@/pageComponents/account/team/OrgManage/IconButton';
export interface VoiceInputComponentRef {
onSpeak: () => void;
}
type VoiceInputProps = {
onSendMessage: (params: { text: string; files?: any[]; autoTTSResponse?: boolean }) => void;
resetInputVal: (val: { text: string }) => void;
};
// PC voice input
const PCVoiceInput = ({
speakingTimeString,
stopSpeak,
canvasRef
}: {
speakingTimeString: string;
stopSpeak: (param: boolean) => void;
canvasRef: React.RefObject<HTMLCanvasElement>;
}) => {
const { t } = useTranslation();
return (
<HStack h={'100%'} px={4}>
<Box fontSize="sm" color="myGray.500" flex={'1 0 0'}>
{t('common:core.chat.Speaking')}
</Box>
<canvas
ref={canvasRef}
style={{
height: '10px',
width: '100px',
background: 'white'
}}
/>
<Box fontSize="sm" color="myGray.500" whiteSpace={'nowrap'}>
{speakingTimeString}
</Box>
<MyTooltip label={t('common:core.chat.Cancel Speak')}>
<MyIconButton
name={'core/chat/cancelSpeak'}
h={'22px'}
w={'22px'}
onClick={() => stopSpeak(true)}
/>
</MyTooltip>
<MyTooltip label={t('common:core.chat.Finish Speak')}>
<MyIconButton
name={'core/chat/finishSpeak'}
h={'22px'}
w={'22px'}
onClick={() => stopSpeak(false)}
/>
</MyTooltip>
</HStack>
);
};
// mobile voice input
const MobileVoiceInput = ({
isSpeaking,
onStartSpeak,
onCloseSpeak,
stopSpeak,
canvasRef
}: {
isSpeaking: boolean;
onStartSpeak: () => void;
onCloseSpeak: () => any;
stopSpeak: (param: boolean) => void;
canvasRef: React.RefObject<HTMLCanvasElement>;
}) => {
const { t } = useTranslation();
const isPressing = useRef(false);
const startTimeRef = useRef(0); // 防抖
const startYRef = useRef(0);
const [isCancel, setIsCancel] = useState(false);
const canvasPosition = canvasRef.current?.getBoundingClientRect();
const maskBottom = canvasPosition ? `${window.innerHeight - canvasPosition.top}px` : '50px';
const handleTouchStart = useCallback(
(e: React.TouchEvent<HTMLDivElement>) => {
isPressing.current = true;
setIsCancel(false);
startTimeRef.current = Date.now();
const touch = e.touches[0];
startYRef.current = touch.pageY;
onStartSpeak();
},
[onStartSpeak]
);
const handleTouchMove = useCallback(
(e: React.TouchEvent<HTMLDivElement>) => {
const touch = e.touches[0] as Touch;
const currentY = touch.pageY;
const deltaY = startYRef.current - currentY;
if (deltaY > 90) {
setIsCancel(true);
} else if (deltaY <= 90) {
setIsCancel(false);
}
},
[startYRef]
);
const handleTouchEnd = useCallback(
(e: React.TouchEvent<HTMLDivElement>) => {
if (!isPressing.current) return;
const endTime = Date.now();
const timeDifference = endTime - startTimeRef.current;
if (isCancel || timeDifference < 200) {
stopSpeak(true);
} else {
stopSpeak(false);
}
},
[isCancel, stopSpeak]
);
return (
<Flex position="relative" h="100%">
{/* Back Icon */}
{!isSpeaking && (
<MyTooltip label={t('chat:back_to_text')}>
<MyIconButton
position="absolute"
right={2}
top={'50%'}
transform={'translateY(-50%)'}
zIndex={5}
name={'core/chat/backText'}
h={'22px'}
w={'22px'}
onClick={onCloseSpeak}
/>
</MyTooltip>
)}
<Flex
alignItems={'center'}
justifyContent={'center'}
h="100%"
flex="1 0 0"
bg={isSpeaking ? (isCancel ? 'red.500' : 'primary.500') : 'white'}
onTouchMove={handleTouchMove}
onTouchEnd={handleTouchEnd}
onTouchStart={handleTouchStart}
onTouchCancel={() => {
stopSpeak(true);
}}
zIndex={4}
>
<Box visibility={isSpeaking ? 'hidden' : 'visible'}>{t('chat:press_to_speak')}</Box>
<Box
position="absolute"
h={'100%'}
w={'100%'}
as="canvas"
ref={canvasRef}
flex="0 0 80%"
visibility={isSpeaking ? 'visible' : 'hidden'}
/>
</Flex>
{/* Mask */}
{isSpeaking && (
<Flex
justifyContent="center"
alignItems="center"
height="100%"
position="fixed"
left={0}
right={0}
bottom={maskBottom}
h={'200px'}
bg="linear-gradient(to top, white, rgba(255, 255, 255, 0.7), rgba(255, 255, 255, 0))"
>
<Box fontSize="sm" color="myGray.500" position="absolute" bottom={'10px'}>
{isCancel ? t('chat:release_cancel') : t('chat:release_send')}
</Box>
</Flex>
)}
</Flex>
);
};
const VoiceInput = forwardRef<VoiceInputComponentRef, VoiceInputProps>(
({ onSendMessage, resetInputVal }, ref) => {
const { t } = useTranslation();
const { isPc } = useSystem();
const outLinkAuthData = useContextSelector(ChatBoxContext, (v) => v.outLinkAuthData);
const appId = useContextSelector(ChatBoxContext, (v) => v.appId);
const whisperConfig = useContextSelector(ChatBoxContext, (v) => v.whisperConfig);
const autoTTSResponse = useContextSelector(ChatBoxContext, (v) => v.autoTTSResponse);
const canvasRef = useRef<HTMLCanvasElement>(null);
const {
isSpeaking,
isTransCription,
stopSpeak,
startSpeak,
speakingTimeString,
renderAudioGraphPc,
renderAudioGraphMobile,
stream
} = useSpeech({ appId, ...outLinkAuthData });
const [mobilePreSpeak, setMobilePreSpeak] = useState(false);
// Canvas render
useEffect(() => {
if (!stream) {
return;
}
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
analyser.fftSize = 4096;
analyser.smoothingTimeConstant = 1;
const source = audioContext.createMediaStreamSource(stream);
source.connect(analyser);
let animationFrameId: number | null = null;
const renderCurve = () => {
const canvas = canvasRef.current;
if (!canvas) return;
const ctx = canvas.getContext('2d');
if (!ctx) return;
if (!stream.active) {
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (animationFrameId) {
window.cancelAnimationFrame(animationFrameId);
animationFrameId = null;
}
return;
}
if (isPc) {
renderAudioGraphPc(analyser, canvas);
} else {
renderAudioGraphMobile(analyser, canvas);
}
animationFrameId = window.requestAnimationFrame(renderCurve);
};
renderCurve();
return () => {
if (animationFrameId) {
window.cancelAnimationFrame(animationFrameId);
}
audioContext.close();
source.disconnect();
analyser.disconnect();
};
}, [stream, canvasRef, renderAudioGraphPc, renderAudioGraphMobile, isPc]);
const onStartSpeak = useCallback(() => {
const finishWhisperTranscription = (text: string) => {
if (!text) return;
if (whisperConfig?.autoSend) {
onSendMessage({
text,
autoTTSResponse
});
} else {
resetInputVal({ text });
}
};
startSpeak(finishWhisperTranscription);
}, [autoTTSResponse, onSendMessage, resetInputVal, startSpeak, whisperConfig?.autoSend]);
const onSpeach = useCallback(() => {
if (isPc) {
onStartSpeak();
} else {
setMobilePreSpeak(true);
}
}, [isPc, onStartSpeak]);
useImperativeHandle(ref, () => ({
onSpeak: onSpeach
}));
if (!whisperConfig?.open) return null;
if (!mobilePreSpeak && !isSpeaking && !isTransCription) return null;
return (
<Box
position="absolute"
overflow={'hidden'}
userSelect={'none'}
top={0}
left={0}
right={0}
bottom={0}
bg="white"
zIndex={5}
borderRadius={isPc ? 'md' : ''}
onContextMenu={(e) => e.preventDefault()}
>
{isPc ? (
<PCVoiceInput
speakingTimeString={speakingTimeString}
stopSpeak={stopSpeak}
canvasRef={canvasRef}
/>
) : (
<MobileVoiceInput
isSpeaking={isSpeaking}
onStartSpeak={onStartSpeak}
onCloseSpeak={() => setMobilePreSpeak(false)}
stopSpeak={stopSpeak}
canvasRef={canvasRef}
/>
)}
{isTransCription && (
<Flex
position={'absolute'}
top={0}
bottom={0}
left={0}
right={0}
pl={5}
alignItems={'center'}
bg={'white'}
color={'primary.500'}
zIndex={6}
>
<Spinner size={'sm'} mr={4} />
{t('common:core.chat.Converting to text')}
</Flex>
)}
</Box>
);
}
);
VoiceInput.displayName = 'VoiceInput';
export default VoiceInput;

View File

@@ -219,7 +219,8 @@ const ChatBox = ({
tool,
interactive,
autoTTSResponse,
variables
variables,
nodeResponse
}: generatingMessageProps & { autoTTSResponse?: boolean }) => {
setChatRecords((state) =>
state.map((item, index) => {
@@ -232,7 +233,14 @@ const ChatBox = ({
JSON.stringify(item.value[item.value.length - 1])
);
if (event === SseResponseEventEnum.flowNodeStatus && status) {
if (event === SseResponseEventEnum.flowNodeResponse && nodeResponse) {
return {
...item,
responseData: item.responseData
? [...item.responseData, nodeResponse]
: [nodeResponse]
};
} else if (event === SseResponseEventEnum.flowNodeStatus && status) {
return {
...item,
status,
@@ -518,36 +526,34 @@ const ChatBox = ({
reserveTool: true
});
const {
responseData,
responseText,
isNewChat = false
} = await onStartChat({
const { responseText } = await onStartChat({
messages, // 保证最后一条是 Human 的消息
responseChatItemId: responseChatId,
controller: abortSignal,
generatingMessage: (e) => generatingMessage({ ...e, autoTTSResponse }),
variables: requestVariables
});
if (responseData?.[responseData.length - 1]?.error) {
toast({
title: t(responseData[responseData.length - 1].error?.message),
status: 'error'
});
}
// Set last chat finish status
let newChatHistories: ChatSiteItemType[] = [];
setChatRecords((state) => {
newChatHistories = state.map((item, index) => {
if (index !== state.length - 1) return item;
// Check node response error
const responseData = mergeChatResponseData(item.responseData || []);
if (responseData[responseData.length - 1]?.error) {
toast({
title: t(responseData[responseData.length - 1].error?.message),
status: 'error'
});
}
return {
...item,
status: ChatStatusEnum.finish,
time: new Date(),
responseData: item.responseData
? mergeChatResponseData([...item.responseData, ...responseData])
: responseData
responseData
};
});
return newChatHistories;
@@ -567,7 +573,7 @@ const ChatBox = ({
} catch (err: any) {
console.log(err);
toast({
title: t(getErrText(err, 'core.chat.error.Chat error') as any),
title: t(getErrText(err, t('common:core.chat.error.Chat error') as any)),
status: 'error',
duration: 5000,
isClosable: true
@@ -807,12 +813,14 @@ const ChatBox = ({
showEmptyIntro &&
chatRecords.length === 0 &&
!variableList?.length &&
!externalVariableList?.length &&
!welcomeText,
[
chatRecords.length,
feConfigs?.show_emptyChat,
showEmptyIntro,
variableList?.length,
externalVariableList?.length,
welcomeText
]
);

View File

@@ -18,6 +18,7 @@ import { ChatItemContext } from '@/web/core/chat/context/chatItemContext';
import { ChatRecordContext } from '@/web/core/chat/context/chatRecordContext';
import { AppFileSelectConfigType } from '@fastgpt/global/core/app/type';
import { defaultAppSelectFileConfig } from '@fastgpt/global/core/app/constants';
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
type PluginRunContextType = PluginRunBoxProps & {
isChatting: boolean;
@@ -46,11 +47,12 @@ const PluginRunContextProvider = ({
const pluginInputs = useContextSelector(ChatItemContext, (v) => v.chatBoxData?.app?.pluginInputs);
const setTab = useContextSelector(ChatItemContext, (v) => v.setPluginRunTab);
const variablesForm = useContextSelector(ChatItemContext, (v) => v.variablesForm);
const chatConfig = useContextSelector(ChatItemContext, (v) => v.chatBoxData?.app?.chatConfig);
const setChatRecords = useContextSelector(ChatRecordContext, (v) => v.setChatRecords);
const chatRecords = useContextSelector(ChatRecordContext, (v) => v.chatRecords);
const chatConfig = useContextSelector(ChatItemContext, (v) => v.chatBoxData?.app?.chatConfig);
const { instruction = '', fileSelectConfig = defaultAppSelectFileConfig } = useMemo(
() => chatConfig || {},
[chatConfig]
@@ -65,7 +67,7 @@ const PluginRunContextProvider = ({
}, []);
const generatingMessage = useCallback(
({ event, text = '', status, name, tool }: generatingMessageProps) => {
({ event, text = '', status, name, tool, nodeResponse, variables }: generatingMessageProps) => {
setChatRecords((state) =>
state.map((item, index) => {
if (index !== state.length - 1 || item.obj !== ChatRoleEnum.AI) return item;
@@ -74,7 +76,14 @@ const PluginRunContextProvider = ({
JSON.stringify(item.value[item.value.length - 1])
);
if (event === SseResponseEventEnum.flowNodeStatus && status) {
if (event === SseResponseEventEnum.flowNodeResponse && nodeResponse) {
return {
...item,
responseData: item.responseData
? [...item.responseData, nodeResponse]
: [nodeResponse]
};
} else if (event === SseResponseEventEnum.flowNodeStatus && status) {
return {
...item,
status,
@@ -144,13 +153,15 @@ const PluginRunContextProvider = ({
return val;
})
};
} else if (event === SseResponseEventEnum.updateVariables && variables) {
variablesForm.setValue('variables', variables);
}
return item;
})
);
},
[setChatRecords]
[setChatRecords, variablesForm]
);
const isChatting = useMemo(
@@ -226,7 +237,7 @@ const PluginRunContextProvider = ({
}
}
const { responseData } = await onStartChat({
await onStartChat({
messages,
controller: chatController.current,
generatingMessage,
@@ -235,16 +246,20 @@ const PluginRunContextProvider = ({
...formatVariables
}
});
if (responseData?.[responseData.length - 1]?.error) {
toast({
title: responseData[responseData.length - 1].error?.message,
status: 'error'
});
}
setChatRecords((state) =>
state.map((item, index) => {
if (index !== state.length - 1) return item;
// Check node response error
const responseData = mergeChatResponseData(item.responseData || []);
if (responseData[responseData.length - 1]?.error) {
toast({
title: t(responseData[responseData.length - 1].error?.message),
status: 'error'
});
}
return {
...item,
status: 'finish',

View File

@@ -1,6 +1,10 @@
import { StreamResponseType } from '@/web/common/api/fetch';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ChatSiteItemType, ToolModuleResponseItemType } from '@fastgpt/global/core/chat/type';
import {
ChatHistoryItemResType,
ChatSiteItemType,
ToolModuleResponseItemType
} from '@fastgpt/global/core/chat/type';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
export type generatingMessageProps = {
@@ -12,6 +16,7 @@ export type generatingMessageProps = {
tool?: ToolModuleResponseItemType;
interactive?: WorkflowInteractiveResponseType;
variables?: Record<string, any>;
nodeResponse?: ChatHistoryItemResType;
};
export type StartChatFnProps = {

View File

@@ -17,6 +17,7 @@ import { ChatBoxContext } from '../ChatContainer/ChatBox/Provider';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import { getFileIcon } from '@fastgpt/global/common/file/icon';
import EmptyTip from '@fastgpt/web/components/common/EmptyTip';
import { completionFinishReasonMap } from '@fastgpt/global/core/ai/constants';
type sideTabItemType = {
moduleLogo?: string;
@@ -196,6 +197,13 @@ export const WholeResponseContent = ({
label={t('common:core.chat.response.module maxToken')}
value={activeModule?.maxToken}
/>
{activeModule?.finishReason && (
<Row
label={t('chat:completion_finish_reason')}
value={t(completionFinishReasonMap[activeModule?.finishReason])}
/>
)}
<Row label={t('chat:reasoning_text')} value={activeModule?.reasoningText} />
<Row
label={t('common:core.chat.response.module historyPreview')}