File input (#2270)

* doc

* feat: file upload  config

* perf: chat box file params

* feat: markdown show file

* feat: chat file store and clear

* perf: read file contentType

* feat: llm vision config

* feat: file url output

* perf: plugin error text

* perf: image load

* feat: ai chat document

* perf: file block ui

* feat: read file node

* feat: file read response field

* feat: simple mode support read files

* feat: tool call

* feat: read file histories

* perf: select file

* perf: select file config

* i18n

* i18n

* fix: ts; feat: tool response preview result
This commit is contained in:
Archer
2024-08-06 10:00:22 +08:00
committed by GitHub
parent 10dcdb5491
commit e36d9d794f
121 changed files with 2600 additions and 1142 deletions

View File

@@ -41,8 +41,11 @@ const AIChatSettingsModal = ({
});
const model = watch('model');
const showResponseAnswerText = watch(NodeInputKeyEnum.aiChatIsResponseText) !== undefined;
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
const useVision = watch('aiChatVision');
const selectedModel = llmModelList.find((item) => item.model === model) || llmModelList[0];
const llmSupportVision = !!selectedModel?.vision;
const tokenLimit = useMemo(() => {
return llmModelList.find((item) => item.model === model)?.maxResponse || 4096;
@@ -65,7 +68,7 @@ const AIChatSettingsModal = ({
alignItems: 'center',
fontSize: 'sm',
color: 'myGray.900',
width: ['80px', '90px']
width: ['6rem', '8rem']
};
return (
@@ -110,26 +113,24 @@ const AIChatSettingsModal = ({
</Box>
</Flex>
{feConfigs && (
<Flex mt={8}>
<Flex mt={6}>
<Box {...LabelStyles} mr={2}>
{t('common:core.ai.Ai point price')}
</Box>
<Box flex={1} ml={'10px'}>
{t('support.wallet.Ai point every thousand tokens', {
<Box flex={1}>
{t('common:support.wallet.Ai point every thousand tokens', {
points: selectedModel?.charsPointsPrice || 0
})}
</Box>
</Flex>
)}
<Flex mt={8}>
<Flex mt={6}>
<Box {...LabelStyles} mr={2}>
{t('common:core.ai.Max context')}
</Box>
<Box flex={1} ml={'10px'}>
{selectedModel?.maxContext || 4096}Tokens
</Box>
<Box flex={1}>{selectedModel?.maxContext || 4096}Tokens</Box>
</Flex>
<Flex mt={8}>
<Flex mt={6}>
<Box {...LabelStyles} mr={2}>
{t('common:core.ai.Support tool')}
<QuestionTip ml={1} label={t('common:core.module.template.AI support tool tip')} />
@@ -140,11 +141,11 @@ const AIChatSettingsModal = ({
: t('common:common.not_support')}
</Box>
</Flex>
<Flex mt={8}>
<Flex mt={6}>
<Box {...LabelStyles} mr={2}>
{t('common:core.app.Temperature')}
</Box>
<Box flex={1} ml={'10px'}>
<Box flex={1} ml={1}>
<MySlider
markList={[
{ label: t('common:core.app.deterministic'), value: 0 },
@@ -161,11 +162,11 @@ const AIChatSettingsModal = ({
/>
</Box>
</Flex>
<Flex mt={8}>
<Flex mt={6}>
<Box {...LabelStyles} mr={2}>
{t('common:core.app.Max tokens')}
</Box>
<Box flex={1} ml={'10px'}>
<Box flex={1}>
<MySlider
markList={[
{ label: '100', value: 100 },
@@ -184,11 +185,11 @@ const AIChatSettingsModal = ({
</Box>
</Flex>
{showMaxHistoriesSlider && (
<Flex mt={8}>
<Flex mt={6}>
<Box {...LabelStyles} mr={2}>
{t('common:core.app.Max histories')}
</Box>
<Box flex={1} ml={'10px'}>
<Box flex={1}>
<MySlider
markList={[
{ label: 0, value: 0 },
@@ -207,7 +208,7 @@ const AIChatSettingsModal = ({
</Flex>
)}
{showResponseAnswerText && (
<Flex mt={8} alignItems={'center'}>
<Flex mt={6} alignItems={'center'}>
<Box {...LabelStyles}>
{t('common:core.app.Ai response')}
<QuestionTip
@@ -215,7 +216,7 @@ const AIChatSettingsModal = ({
label={t('common:core.module.template.AI response switch tip')}
></QuestionTip>
</Box>
<Box flex={1} ml={'10px'}>
<Box flex={1}>
<Switch
isChecked={getValues(NodeInputKeyEnum.aiChatIsResponseText)}
onChange={(e) => {
@@ -227,6 +228,29 @@ const AIChatSettingsModal = ({
</Box>
</Flex>
)}
{showVisionSwitch && (
<Flex mt={6} alignItems={'center'}>
<Box {...LabelStyles}>
{t('app:llm_use_vision')}
<QuestionTip ml={1} label={t('app:llm_use_vision_tip')}></QuestionTip>
</Box>
<Box flex={1}>
{llmSupportVision ? (
<Switch
isChecked={useVision}
onChange={(e) => {
const value = e.target.checked;
setValue(NodeInputKeyEnum.aiChatVision, value);
}}
/>
) : (
<Box fontSize={'sm'} color={'myGray.500'}>
{t('app:llm_not_support_vision')}
</Box>
)}
</Box>
</Flex>
)}
</ModalBody>
<ModalFooter>
<Button variant={'whiteBase'} onClick={onClose}>

View File

@@ -1,13 +1,15 @@
import React, { useEffect } from 'react';
import React from 'react';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants';
import { Box, Button, Flex, css, useDisclosure } from '@chakra-ui/react';
import { Box, Button, css, useDisclosure } from '@chakra-ui/react';
import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d';
import AISettingModal from '@/components/core/ai/AISettingModal';
import Avatar from '@fastgpt/web/components/common/Avatar';
import { HUGGING_FACE_ICON } from '@fastgpt/global/common/system/constants';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import { useTranslation } from 'next-i18next';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { useMount } from 'ahooks';
type Props = {
llmModelType?: `${LLMModelTypeEnum}`;
@@ -37,14 +39,15 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
onClose: onCloseAIChatSetting
} = useDisclosure();
useEffect(() => {
// Set default model
useMount(() => {
if (!model && modelList.length > 0) {
onChange({
...defaultData,
model: modelList[0].model
});
}
}, []);
});
return (
<Box
@@ -71,10 +74,13 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
w={'18px'}
/>
}
rightIcon={<MyIcon name={'common/select'} w={'1rem'} />}
pl={4}
onClick={onOpenAIChatSetting}
>
{selectedModel?.name}
<Box flex={1} textAlign={'left'}>
{selectedModel?.name}
</Box>
</Button>
</MyTooltip>
{isOpenAIChatSetting && (