V4.8.20 feature (#3686)

* Aiproxy (#3649)

* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile

* feat: usage filter & export & dashbord (#3538)

* feat: usage filter & export & dashbord

* adjust ui

* fix tmb scroll

* fix code & selecte all

* merge

* perf: usages list;perf: move components (#3654)

* perf: usages list

* team sub plan load

* perf: usage dashboard code

* perf: dashboard ui

* perf: move components

* add default model config (#3653)

* 4.8.20 test (#3656)

* provider

* perf: model config

* model perf (#3657)

* fix: model

* dataset quote

* perf: model config

* model tag

* doubao model config

* perf: config model

* feat: model test

* fix: POST 500 error on dingtalk bot (#3655)

* feat: default model (#3662)

* move model config

* feat: default model

* fix: false triggerd org selection (#3661)

* export usage csv i18n (#3660)

* export usage csv i18n

* fix build

* feat: markdown extension (#3663)

* feat: markdown extension

* media cros

* rerank test

* default price

* perf: default model

* fix: cannot custom provider

* fix: default model select

* update bg

* perf: default model selector

* fix: usage export

* i18n

* fix: rerank

* update init extension

* perf: ip limit check

* doubao model order

* web default modle

* perf: tts selector

* perf: tts error

* qrcode package

* reload buffer (#3665)

* reload buffer

* reload buffer

* tts selector

* fix: err tip (#3666)

* fix: err tip

* perf: training queue

* doc

* fix interactive edge (#3659)

* fix interactive edge

* fix

* comment

* add gemini model

* fix: chat model select

* perf: supplement assistant empty response (#3669)

* perf: supplement assistant empty response

* check array

* perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n

* fix: stream response (#3682)

* perf: supplement assistant empty response

* check array

* fix: stream response

* fix: model config cannot set to null

* fix: reasoning response (#3684)

* perf: supplement assistant empty response

* check array

* fix: reasoning response

* fix: reasoning response

* doc (#3685)

* perf: supplement assistant empty response

* check array

* doc

* lock

* animation

* update doc

* update compose

* doc

* doc

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
Archer
2025-02-05 00:10:47 +08:00
committed by GitHub
parent c393002f1d
commit db2c0a0bdb
496 changed files with 9031 additions and 4726 deletions

View File

@@ -72,6 +72,7 @@ const AIChatSettingsModal = ({
defaultValues: defaultData
});
const model = watch('model');
const reasoning = watch(NodeInputKeyEnum.aiChatReasoning);
const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined;
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
@@ -80,8 +81,12 @@ const AIChatSettingsModal = ({
const temperature = watch('temperature');
const useVision = watch('aiChatVision');
const selectedModel = getWebLLMModel(model);
const selectedModel = useMemo(() => {
return getWebLLMModel(model);
}, [model]);
const llmSupportVision = !!selectedModel?.vision;
const llmSupportTemperature = typeof selectedModel?.maxTemperature === 'number';
const llmSupportReasoning = !!selectedModel?.reasoning;
const tokenLimit = useMemo(() => {
return selectedModel?.maxResponse || 4096;
@@ -244,7 +249,7 @@ const AIChatSettingsModal = ({
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={100}
min={0}
max={tokenLimit}
step={200}
isDisabled={maxToken === undefined}
@@ -256,36 +261,51 @@ const AIChatSettingsModal = ({
/>
</Box>
</Flex>
<Flex {...FlexItemStyles}>
<Box {...LabelStyles}>
<Flex alignItems={'center'}>
{t('app:temperature')}
<QuestionTip label={t('app:temperature_tip')} />
</Flex>
<Switch
isChecked={temperature !== undefined}
size={'sm'}
onChange={(e) => {
setValue('temperature', e.target.checked ? 0 : undefined);
}}
/>
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={0}
max={10}
step={1}
value={temperature}
isDisabled={temperature === undefined}
onChange={(e) => {
setValue(NodeInputKeyEnum.aiChatTemperature, e);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
{llmSupportTemperature && (
<Flex {...FlexItemStyles}>
<Box {...LabelStyles}>
<Flex alignItems={'center'}>
{t('app:temperature')}
<QuestionTip label={t('app:temperature_tip')} />
</Flex>
<Switch
isChecked={temperature !== undefined}
size={'sm'}
onChange={(e) => {
setValue('temperature', e.target.checked ? 0 : undefined);
}}
/>
</Box>
<Box flex={'1 0 0'}>
<InputSlider
min={0}
max={10}
step={1}
value={temperature}
isDisabled={temperature === undefined}
onChange={(e) => {
setValue(NodeInputKeyEnum.aiChatTemperature, e);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
)}
{llmSupportReasoning && (
<Flex {...FlexItemStyles} h={'25px'}>
<Box {...LabelStyles}>
<Flex alignItems={'center'}>{t('app:reasoning_response')}</Flex>
<Switch
isChecked={reasoning || false}
size={'sm'}
onChange={(e) => {
const value = e.target.checked;
setValue(NodeInputKeyEnum.aiChatReasoning, value);
}}
/>
</Box>
</Flex>
)}
{showResponseAnswerText && (
<Flex {...FlexItemStyles} h={'25px'}>
<Box {...LabelStyles}>
@@ -307,12 +327,11 @@ const AIChatSettingsModal = ({
)}
{showVisionSwitch && (
<Flex {...FlexItemStyles} h={'25px'}>
<Box {...LabelStyles}>
<Box {...LabelStyles} w={llmSupportVision ? '9rem' : 'auto'}>
<Flex alignItems={'center'}>
{t('app:llm_use_vision')}
<QuestionTip ml={1} label={t('app:llm_use_vision_tip')}></QuestionTip>
</Flex>
{llmSupportVision ? (
<Switch
isChecked={useVision}
@@ -323,7 +342,7 @@ const AIChatSettingsModal = ({
}}
/>
) : (
<Box fontSize={'sm'} color={'myGray.500'}>
<Box ml={3} fontSize={'sm'} color={'myGray.500'}>
{t('app:llm_not_support_vision')}
</Box>
)}

View File

@@ -26,6 +26,7 @@ import { useSystemStore } from '@/web/common/system/useSystemStore';
import Avatar from '@fastgpt/web/components/common/Avatar';
import MyTag from '@fastgpt/web/components/common/Tag/index';
import dynamic from 'next/dynamic';
import CopyBox from '@fastgpt/web/components/common/String/CopyBox';
const MyModal = dynamic(() => import('@fastgpt/web/components/common/MyModal'));
@@ -53,7 +54,8 @@ const ModelTable = () => {
const [search, setSearch] = useState('');
const { llmModelList, audioSpeechModelList, vectorModelList, whisperModel } = useSystemStore();
const { llmModelList, ttsModelList, embeddingModelList, sttModelList, reRankModelList } =
useSystemStore();
const modelList = useMemo(() => {
const formatLLMModelList = llmModelList.map((item) => ({
@@ -80,64 +82,72 @@ const ModelTable = () => {
) : (
<Flex color={'myGray.700'}>
<Box fontWeight={'bold'} color={'myGray.900'} mr={0.5}>
{item.charsPointsPrice}
{item.charsPointsPrice || 0}
</Box>
{`${t('common:support.wallet.subscription.point')} / 1K Tokens`}
</Flex>
),
tagColor: 'blue'
}));
const formatVectorModelList = vectorModelList.map((item) => ({
const formatVectorModelList = embeddingModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.embedding'),
priceLabel: (
<Flex color={'myGray.700'}>
<Box fontWeight={'bold'} color={'myGray.900'} mr={0.5}>
{item.charsPointsPrice}
{item.charsPointsPrice || 0}
</Box>
{` ${t('common:support.wallet.subscription.point')} / 1K Tokens`}
</Flex>
),
tagColor: 'yellow'
}));
const formatAudioSpeechModelList = audioSpeechModelList.map((item) => ({
const formatAudioSpeechModelList = ttsModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.tts'),
priceLabel: (
<Flex color={'myGray.700'}>
<Box fontWeight={'bold'} color={'myGray.900'} mr={0.5}>
{item.charsPointsPrice}
{item.charsPointsPrice || 0}
</Box>
{` ${t('common:support.wallet.subscription.point')} / 1K ${t('common:unit.character')}`}
</Flex>
),
tagColor: 'green'
}));
const formatWhisperModel = {
...whisperModel,
const formatWhisperModelList = sttModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.stt'),
priceLabel: (
<Flex color={'myGray.700'}>
<Box fontWeight={'bold'} color={'myGray.900'} mr={0.5}>
{whisperModel.charsPointsPrice}
{item.charsPointsPrice}
</Box>
{` ${t('common:support.wallet.subscription.point')} / 60${t('common:unit.seconds')}`}
</Flex>
),
tagColor: 'purple'
};
}));
const formatRerankModelList = reRankModelList.map((item) => ({
...item,
typeLabel: t('common:model.type.reRank'),
priceLabel: <Flex color={'myGray.700'}>- </Flex>,
tagColor: 'red'
}));
const list = (() => {
if (modelType === ModelTypeEnum.chat) return formatLLMModelList;
if (modelType === ModelTypeEnum.llm) return formatLLMModelList;
if (modelType === ModelTypeEnum.embedding) return formatVectorModelList;
if (modelType === ModelTypeEnum.tts) return formatAudioSpeechModelList;
if (modelType === ModelTypeEnum.stt) return [formatWhisperModel];
if (modelType === ModelTypeEnum.stt) return formatWhisperModelList;
if (modelType === ModelTypeEnum.rerank) return formatRerankModelList;
return [
...formatLLMModelList,
...formatVectorModelList,
...formatAudioSpeechModelList,
formatWhisperModel
...formatWhisperModelList,
...formatRerankModelList
];
})();
const formatList = list.map((item) => {
@@ -167,9 +177,10 @@ const ModelTable = () => {
return filterList;
}, [
llmModelList,
vectorModelList,
audioSpeechModelList,
whisperModel,
embeddingModelList,
ttsModelList,
sttModelList,
reRankModelList,
t,
modelType,
provider,
@@ -179,15 +190,16 @@ const ModelTable = () => {
const filterProviderList = useMemo(() => {
const allProviderIds: string[] = [
...llmModelList,
...vectorModelList,
...audioSpeechModelList,
whisperModel
...embeddingModelList,
...ttsModelList,
...sttModelList,
...reRankModelList
].map((model) => model.provider);
return providerList.current.filter(
(item) => allProviderIds.includes(item.value) || item.value === ''
);
}, [audioSpeechModelList, llmModelList, vectorModelList, whisperModel]);
}, [ttsModelList, llmModelList, embeddingModelList, sttModelList, reRankModelList]);
return (
<Flex flexDirection={'column'} h={'100%'}>
@@ -241,7 +253,9 @@ const ModelTable = () => {
<Td fontSize={'sm'}>
<HStack>
<Avatar src={item.avatar} w={'1.2rem'} />
<Box color={'myGray.900'}>{item.name}</Box>
<CopyBox value={item.name} color={'myGray.900'}>
{item.name}
</CopyBox>
</HStack>
</Td>
<Td>

View File

@@ -1,4 +1,4 @@
import React, { useMemo } from 'react';
import React, { useEffect, useMemo } from 'react';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants';
import { Box, css, HStack, IconButton, useDisclosure } from '@chakra-ui/react';
@@ -7,8 +7,8 @@ import AISettingModal, { AIChatSettingsModalProps } from '@/components/core/ai/A
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import { useTranslation } from 'next-i18next';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { useMount } from 'ahooks';
import AIModelSelector from '@/components/Select/AIModelSelector';
import { getWebDefaultModel } from '@/web/common/system/utils';
type Props = {
llmModelType?: `${LLMModelTypeEnum}`;
@@ -39,15 +39,19 @@ const SettingLLMModel = ({
}),
[llmModelList, llmModelType]
);
const defaultModel = useMemo(() => {
return getWebDefaultModel(modelList).model;
}, [modelList]);
// Set default model
useMount(() => {
if (!model && modelList.length > 0) {
useEffect(() => {
if (!modelList.find((item) => item.model === model) && !!defaultModel) {
onChange({
...defaultData,
model: modelList[0].model
model: defaultModel
});
}
});
}, [modelList, model, defaultModel, onChange]);
const {
isOpen: isOpenAIChatSetting,

View File

@@ -65,17 +65,15 @@ const DatasetParamsModal = ({
const theme = useTheme();
const { toast } = useToast();
const { teamPlanStatus } = useUserStore();
const { reRankModelList, llmModelList } = useSystemStore();
const { reRankModelList, llmModelList, defaultModels } = useSystemStore();
const [refresh, setRefresh] = useState(false);
const [currentTabType, setCurrentTabType] = useState(SearchSettingTabEnum.searchMode);
const chatModelSelectList = (() =>
llmModelList
.filter((model) => model.usedInQueryExtension)
.map((item) => ({
value: item.model,
label: item.name
})))();
llmModelList.map((item) => ({
value: item.model,
label: item.name
})))();
const { register, setValue, getValues, handleSubmit, watch } = useForm<DatasetParamsProps>({
defaultValues: {
@@ -84,7 +82,7 @@ const DatasetParamsModal = ({
searchMode,
usingReRank: !!usingReRank && teamPlanStatus?.standardConstants?.permissionReRank !== false,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel: datasetSearchExtensionModel || chatModelSelectList[0]?.value,
datasetSearchExtensionModel: datasetSearchExtensionModel || defaultModels.llm?.model,
datasetSearchExtensionBg
}
});

View File

@@ -1,6 +1,6 @@
import MyIcon from '@fastgpt/web/components/common/Icon';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import { Box, Button, Flex, ModalBody, useDisclosure, Image } from '@chakra-ui/react';
import { Box, Button, Flex, ModalBody, useDisclosure, Image, HStack } from '@chakra-ui/react';
import React, { useCallback, useMemo } from 'react';
import { useTranslation } from 'next-i18next';
import { TTSTypeEnum } from '@/web/core/app/constants';
@@ -9,13 +9,15 @@ import { useAudioPlay } from '@/web/common/utils/voice';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import MyModal from '@fastgpt/web/components/common/MyModal';
import MySlider from '@/components/Slider';
import MySelect from '@fastgpt/web/components/common/MySelect';
import { defaultTTSConfig } from '@fastgpt/global/core/app/constants';
import ChatFunctionTip from './Tip';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import MyImage from '@fastgpt/web/components/common/Image/MyImage';
import { useContextSelector } from 'use-context-selector';
import { AppContext } from '@/pages/app/detail/components/context';
import { AppContext } from '@/pageComponents/app/detail/context';
import Avatar from '@fastgpt/web/components/common/Avatar';
import { getModelProvider } from '@fastgpt/global/core/ai/provider';
import MultipleRowSelect from '@fastgpt/web/components/common/MySelect/MultipleRowSelect';
const TTSSelect = ({
value = defaultTTSConfig,
@@ -25,33 +27,62 @@ const TTSSelect = ({
onChange: (e: AppTTSConfigType) => void;
}) => {
const { t } = useTranslation();
const { audioSpeechModelList } = useSystemStore();
const { ttsModelList } = useSystemStore();
const { isOpen, onOpen, onClose } = useDisclosure();
const appId = useContextSelector(AppContext, (v) => v.appId);
const list = useMemo(
const selectorList = useMemo(
() => [
{ label: t('common:core.app.tts.Close'), value: TTSTypeEnum.none },
{ label: t('common:core.app.tts.Web'), value: TTSTypeEnum.web },
...audioSpeechModelList.map((item) => item?.voices || []).flat()
{ label: t('app:tts_close'), value: TTSTypeEnum.none, children: [] },
{ label: t('app:tts_browser'), value: TTSTypeEnum.web, children: [] },
...ttsModelList.map((model) => {
const providerData = getModelProvider(model.provider);
return {
label: (
<HStack>
<Avatar borderRadius={'0'} w={'1.25rem'} src={providerData.avatar} />
<Box>{t(providerData.name)}</Box>
</HStack>
),
value: model.model,
children: model.voices.map((voice) => ({
label: voice.label,
value: voice.value
}))
};
})
],
[audioSpeechModelList, t]
[ttsModelList, t]
);
const formatValue = useMemo(() => {
if (!value || !value.type) {
return TTSTypeEnum.none;
return [TTSTypeEnum.none, undefined];
}
if (value.type === TTSTypeEnum.none || value.type === TTSTypeEnum.web) {
return value.type;
return [value.type, undefined];
}
return value.voice;
return [value.model, value.voice];
}, [value]);
const formLabel = useMemo(
() => list.find((item) => item.value === formatValue)?.label || t('common:common.UnKnow'),
[formatValue, list, t]
);
const formLabel = useMemo(() => {
const provider = selectorList.find((item) => item.value === formatValue[0]) || selectorList[0];
const voice = provider.children.find((item) => item.value === formatValue[1]);
return (
<Box maxW={'220px'} className="textEllipsis">
{voice ? (
<Flex alignItems={'center'}>
<Box>{provider.label}</Box>
<Box>-</Box>
<Box>{voice.label}</Box>
</Flex>
) : (
provider.label
)}
</Box>
);
}, [formatValue, selectorList, t]);
const { playAudioByText, cancelAudio, audioLoading, audioPlaying } = useAudioPlay({
appId,
@@ -59,25 +90,20 @@ const TTSSelect = ({
});
const onclickChange = useCallback(
(e: string) => {
if (e === TTSTypeEnum.none || e === TTSTypeEnum.web) {
onChange({ type: e as `${TTSTypeEnum}` });
(e: string[]) => {
console.log(e, '-=');
if (e[0] === TTSTypeEnum.none || e[0] === TTSTypeEnum.web) {
onChange({ type: e[0] });
} else {
const audioModel = audioSpeechModelList.find((item) =>
item.voices?.find((voice) => voice.value === e)
);
if (!audioModel) {
return;
}
onChange({
...value,
type: TTSTypeEnum.model,
model: audioModel.model,
voice: e
model: e[0],
voice: e[1]
});
}
},
[audioSpeechModelList, onChange, value]
[ttsModelList, onChange, value]
);
const onCloseTTSModal = useCallback(() => {
@@ -113,7 +139,13 @@ const TTSSelect = ({
<ModalBody px={[5, 16]} py={[4, 8]}>
<Flex justifyContent={'space-between'} alignItems={'center'}>
<FormLabel>{t('common:core.app.tts.Speech model')}</FormLabel>
<MySelect w={'220px'} value={formatValue} list={list} onchange={onclickChange} />
<MultipleRowSelect
rowMinWidth="160px"
label={<Box minW={'150px'}>{formLabel}</Box>}
value={formatValue}
list={selectorList}
onSelect={onclickChange}
/>
</Flex>
<Flex mt={8} justifyContent={'space-between'}>
<FormLabel>{t('common:core.app.tts.Speech speed')}</FormLabel>
@@ -135,7 +167,7 @@ const TTSSelect = ({
}}
/>
</Flex>
{formatValue !== TTSTypeEnum.none && (
{formatValue[0] !== TTSTypeEnum.none && (
<Flex mt={10} justifyContent={'end'}>
{audioPlaying ? (
<Flex>

View File

@@ -30,7 +30,7 @@ import { formatEditorVariablePickerIcon } from '@fastgpt/global/core/workflow/ut
import ChatFunctionTip from './Tip';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import InputTypeConfig from '@/pages/app/detail/components/WorkflowComponents/Flow/nodes/NodePluginIO/InputTypeConfig';
import InputTypeConfig from '@/pageComponents/app/detail/WorkflowComponents/Flow/nodes/NodePluginIO/InputTypeConfig';
import MyIconButton from '@fastgpt/web/components/common/Icon/button';
export const defaultVariable: VariableItemType = {

View File

@@ -25,7 +25,7 @@ const WelcomeTextConfig = (props: TextareaProps) => {
mt={1.5}
rows={6}
fontSize={'sm'}
bg={'white'}
bg={'myGray.50'}
minW={'384px'}
placeholder={t('common:core.app.tip.welcomeTextTip')}
autoHeight

View File

@@ -107,7 +107,7 @@ const ChatInput = ({
);
/* whisper init */
const { whisperModel } = useSystemStore();
const { sttModelList } = useSystemStore();
const canvasRef = useRef<HTMLCanvasElement>(null);
const {
isSpeaking,
@@ -293,7 +293,7 @@ const ChatInput = ({
/>
<Flex alignItems={'center'} position={'absolute'} right={[2, 4]} bottom={['10px', '12px']}>
{/* voice-input */}
{whisperConfig.open && !inputValue && !isChatting && !!whisperModel && (
{whisperConfig.open && !inputValue && !isChatting && sttModelList.length > 0 && (
<>
<canvas
ref={canvasRef}
@@ -431,7 +431,7 @@ const ChatInput = ({
stopSpeak,
t,
whisperConfig.open,
whisperModel
sttModelList
]
);

View File

@@ -1,4 +1,4 @@
import { useCopyData } from '@/web/common/hooks/useCopyData';
import { useCopyData } from '@fastgpt/web/hooks/useCopyData';
import { Flex, FlexProps, css, useTheme } from '@chakra-ui/react';
import { ChatSiteItemType } from '@fastgpt/global/core/chat/type';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';

View File

@@ -15,7 +15,7 @@ import FilesBlock from './FilesBox';
import { ChatBoxContext } from '../Provider';
import { useContextSelector } from 'use-context-selector';
import AIResponseBox from '../../../components/AIResponseBox';
import { useCopyData } from '@/web/common/hooks/useCopyData';
import { useCopyData } from '@fastgpt/web/hooks/useCopyData';
import MyIcon from '@fastgpt/web/components/common/Icon';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import { useTranslation } from 'next-i18next';

View File

@@ -10,7 +10,7 @@ import { AdminFbkType } from '@fastgpt/global/core/chat/type.d';
import SelectCollections from '@/web/core/dataset/components/SelectCollections';
import EmptyTip from '@fastgpt/web/components/common/EmptyTip';
const InputDataModal = dynamic(() => import('@/pages/dataset/detail/components/InputDataModal'));
const InputDataModal = dynamic(() => import('@/pageComponents/dataset/detail/InputDataModal'));
export type AdminMarkType = {
feedbackDataId?: string;

View File

@@ -201,6 +201,7 @@ const ChatBox = ({
({
event,
text = '',
reasoningText,
status,
name,
tool,
@@ -225,6 +226,25 @@ const ChatBox = ({
status,
moduleName: name
};
} else if (event === SseResponseEventEnum.answer && reasoningText) {
if (lastValue.type === ChatItemValueTypeEnum.reasoning && lastValue.reasoning) {
lastValue.reasoning.content += reasoningText;
return {
...item,
value: item.value.slice(0, -1).concat(lastValue)
};
} else {
const val: AIChatItemValueItemType = {
type: ChatItemValueTypeEnum.reasoning,
reasoning: {
content: reasoningText
}
};
return {
...item,
value: item.value.concat(val)
};
}
} else if (
(event === SseResponseEventEnum.answer || event === SseResponseEventEnum.fastAnswer) &&
text

View File

@@ -6,6 +6,7 @@ import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/t
export type generatingMessageProps = {
event: SseResponseEventEnum;
text?: string;
reasoningText?: string;
name?: string;
status?: 'running' | 'finish';
tool?: ToolModuleResponseItemType;

View File

@@ -8,6 +8,7 @@ import {
Box,
Button,
Flex,
HStack,
Textarea
} from '@chakra-ui/react';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
@@ -139,6 +140,58 @@ ${toolResponse}`}
},
(prevProps, nextProps) => isEqual(prevProps, nextProps)
);
const RenderResoningContent = React.memo(function RenderResoningContent({
content,
isChatting,
isLastResponseValue
}: {
content: string;
isChatting: boolean;
isLastResponseValue: boolean;
}) {
const { t } = useTranslation();
const showAnimation = isChatting && isLastResponseValue;
return (
<Accordion allowToggle defaultIndex={isLastResponseValue ? 0 : undefined}>
<AccordionItem borderTop={'none'} borderBottom={'none'}>
<AccordionButton
w={'auto'}
bg={'white'}
borderRadius={'md'}
borderWidth={'1px'}
borderColor={'myGray.200'}
boxShadow={'1'}
pl={3}
pr={2.5}
py={1}
_hover={{
bg: 'auto'
}}
>
<HStack mr={2} spacing={1}>
<MyIcon name={'core/chat/think'} w={'0.85rem'} />
<Box fontSize={'sm'}>{t('chat:ai_reasoning')}</Box>
</HStack>
{showAnimation && <MyIcon name={'common/loading'} w={'0.85rem'} />}
<AccordionIcon color={'myGray.600'} ml={5} />
</AccordionButton>
<AccordionPanel
py={0}
pr={0}
pl={3}
mt={2}
borderLeft={'2px solid'}
borderColor={'myGray.300'}
color={'myGray.500'}
>
<Markdown source={content} showAnimation={showAnimation} />
</AccordionPanel>
</AccordionItem>
</Accordion>
);
});
const RenderUserSelectInteractive = React.memo(function RenderInteractive({
interactive
}: {
@@ -290,6 +343,14 @@ const AIResponseBox = ({ value, isLastResponseValue, isChatting }: props) => {
return (
<RenderText showAnimation={isChatting && isLastResponseValue} text={value.text.content} />
);
if (value.type === ChatItemValueTypeEnum.reasoning && value.reasoning)
return (
<RenderResoningContent
isChatting={isChatting}
isLastResponseValue={isLastResponseValue}
content={value.reasoning.content}
/>
);
if (value.type === ChatItemValueTypeEnum.tool && value.tools)
return <RenderTool showAnimation={isChatting} tools={value.tools} />;
if (value.type === ChatItemValueTypeEnum.interactive && value.interactive) {

View File

@@ -12,7 +12,7 @@ import { SearchScoreTypeEnum, SearchScoreTypeMap } from '@fastgpt/global/core/da
import type { readCollectionSourceBody } from '@/pages/api/core/dataset/collection/read';
import Markdown from '@/components/Markdown';
const InputDataModal = dynamic(() => import('@/pages/dataset/detail/components/InputDataModal'));
const InputDataModal = dynamic(() => import('@/pageComponents/dataset/detail/InputDataModal'));
type ScoreItemType = SearchDataResponseItemType['score'][0];
const scoreTheme: Record<