New file upload (#3058)

* feat: toolNode aiNode readFileNode adapt new version

* update docker-compose

* update tip

* feat: adapt new file version

* perf: file input

* fix: ts
This commit is contained in:
Archer
2024-11-04 10:44:45 +08:00
committed by archer
parent 9e8138e55f
commit dc1119ca90
55 changed files with 1159 additions and 488 deletions

View File

@@ -7,6 +7,20 @@ toc: true
weight: 811
---
## 更新指南
### 1. 做好数据备份
### 2. 修改镜像
- 更新 FastGPT 镜像 tag: v4.8.13-alpha
- 更新 FastGPT 管理端镜像 tag: v4.8.13-alpha fastgpt-pro镜像
- Sandbox 镜像,可以不更新
### 3. 调整文件上传编排
虽然依然兼容旧版的文件上传编排,但是未来两个版本内将会去除兼容代码,请尽快调整编排,以适应最新的文件上传逻辑。尤其是嵌套应用的文件传递,未来将不会自动传递,必须手动指定传递的文件。
## 更新说明
1. 新增 - 数组变量选择支持多选,可以选多个数组或对应的单一数据类型,会自动按选择顺序进行合并。
@@ -24,18 +38,4 @@ weight: 811
13. 优化 - 工作流上下文拆分,性能优化。
14. 优化 - 语音播报,不支持 mediaSource 的浏览器可等待完全生成语音后输出。
15. 修复 - Dockerfile pnpm install 支持代理。
16. 修复 - BI 图表生成无法写入文件。
## 更新指南
### 1. 做好数据备份
### 2. 修改镜像
- 更新 FastGPT 镜像 tag: v4.8.13-alpha
- 更新 FastGPT 管理端镜像 tag: v4.8.13-alpha fastgpt-pro镜像
- Sandbox 镜像,可以不更新
### 3. 调整文件上传编排
虽然依然兼容旧版的文件上传编排,但是未来两个版本内将会去除兼容代码,请尽快调整编排,以适应最新的文件上传逻辑。尤其是嵌套应用的文件传递,未来将不会自动传递,必须手动指定传递的文件。
16. 修复 - BI 图表生成无法写入文件。

View File

@@ -139,6 +139,8 @@ services:
- OPENAI_BASE_URL=http://oneapi:3000/v1
# AI模型的API Key。这里默认填写了OneAPI的快速默认key测试通后务必及时修改
- CHAT_API_KEY=sk-fastgpt
# 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true
- MULTIPLE_DATA_TO_BASE64=false
# 数据库最大连接数
- DB_MAX_LINK=30
# 登录凭证密钥

View File

@@ -97,6 +97,8 @@ services:
- OPENAI_BASE_URL=http://oneapi:3000/v1
# AI模型的API Key。这里默认填写了OneAPI的快速默认key测试通后务必及时修改
- CHAT_API_KEY=sk-fastgpt
# 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true
- MULTIPLE_DATA_TO_BASE64=false
# 数据库最大连接数
- DB_MAX_LINK=30
# 登录凭证密钥

View File

@@ -77,6 +77,8 @@ services:
- OPENAI_BASE_URL=http://oneapi:3000/v1
# AI模型的API Key。这里默认填写了OneAPI的快速默认key测试通后务必及时修改
- CHAT_API_KEY=sk-fastgpt
# 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true
- MULTIPLE_DATA_TO_BASE64=false
# 数据库最大连接数
- DB_MAX_LINK=30
# 登录凭证密钥

View File

@@ -19,3 +19,5 @@ export const bucketNameMap = {
export const ReadFileBaseUrl = `${process.env.FE_DOMAIN || ''}/api/common/file/read`;
export const documentFileType = '.txt, .docx, .csv, .xlsx, .pdf, .md, .html, .pptx';
export const imageFileType =
'.jpg, .jpeg, .png, .gif, .bmp, .webp, .svg, .tiff, .tif, .ico, .heic, .heif, .avif';

View File

@@ -1,4 +1,7 @@
import { detect } from 'jschardet';
import { documentFileType, imageFileType } from './constants';
import { ChatFileTypeEnum } from '../../core/chat/constants';
import { UserChatItemValueItemType } from '../../core/chat/type';
export const formatFileSize = (bytes: number): string => {
if (bytes === 0) return '0 B';
@@ -13,3 +16,39 @@ export const formatFileSize = (bytes: number): string => {
export const detectFileEncoding = (buffer: Buffer) => {
return detect(buffer.slice(0, 200))?.encoding?.toLocaleLowerCase();
};
// Url => user upload file type
export const parseUrlToFileType = (url: string): UserChatItemValueItemType['file'] | undefined => {
const parseUrl = new URL(url, 'https://locaohost:3000');
const filename = (() => {
// Old version file url: https://xxx.com/file/read?filename=xxx.pdf
const filenameQuery = parseUrl.searchParams.get('filename');
if (filenameQuery) return filenameQuery;
// Common file https://xxx.com/xxx.pdf?xxxx=xxx
const pathname = parseUrl.pathname;
if (pathname) return pathname.split('/').pop();
})();
if (!filename) return;
const extension = filename.split('.').pop()?.toLowerCase() || '';
if (!extension) return;
if (documentFileType.includes(extension)) {
return {
type: ChatFileTypeEnum.file,
name: filename,
url
};
}
if (imageFileType.includes(extension)) {
return {
type: ChatFileTypeEnum.image,
name: filename,
url
};
}
};

View File

@@ -30,7 +30,8 @@ export const getChatTitleFromChatMessage = (message?: ChatItemType, defaultValue
// Keep the first n and last n characters
export const getHistoryPreview = (
completeMessages: ChatItemType[],
size = 100
size = 100,
useVision = false
): {
obj: `${ChatRoleEnum}`;
value: string;
@@ -48,7 +49,8 @@ export const getHistoryPreview = (
item.value
?.map((item) => {
if (item?.text?.content) return item?.text?.content;
if (item.file?.type === 'image') return 'Input an image';
if (item.file?.type === 'image' && useVision)
return `![Input an image](${item.file.url.slice(0, 100)}...)`;
return '';
})
.filter(Boolean)

View File

@@ -27,7 +27,9 @@ export enum FlowNodeInputTypeEnum { // render ui
settingDatasetQuotePrompt = 'settingDatasetQuotePrompt',
hidden = 'hidden',
custom = 'custom'
custom = 'custom',
fileSelect = 'fileSelect'
}
export const FlowNodeInputMap: Record<
FlowNodeInputTypeEnum,
@@ -85,6 +87,9 @@ export const FlowNodeInputMap: Record<
},
[FlowNodeInputTypeEnum.textarea]: {
icon: 'core/workflow/inputType/textarea'
},
[FlowNodeInputTypeEnum.fileSelect]: {
icon: 'core/workflow/inputType/file'
}
};
@@ -137,43 +142,43 @@ export enum FlowNodeTypeEnum {
// node IO value type
export const FlowValueTypeMap = {
[WorkflowIOValueTypeEnum.string]: {
label: 'string',
label: 'String',
value: WorkflowIOValueTypeEnum.string
},
[WorkflowIOValueTypeEnum.number]: {
label: 'number',
label: 'Number',
value: WorkflowIOValueTypeEnum.number
},
[WorkflowIOValueTypeEnum.boolean]: {
label: 'boolean',
label: 'Boolean',
value: WorkflowIOValueTypeEnum.boolean
},
[WorkflowIOValueTypeEnum.object]: {
label: 'object',
label: 'Object',
value: WorkflowIOValueTypeEnum.object
},
[WorkflowIOValueTypeEnum.arrayString]: {
label: 'array<string>',
label: 'Array<string>',
value: WorkflowIOValueTypeEnum.arrayString
},
[WorkflowIOValueTypeEnum.arrayNumber]: {
label: 'array<number>',
label: 'Array<number>',
value: WorkflowIOValueTypeEnum.arrayNumber
},
[WorkflowIOValueTypeEnum.arrayBoolean]: {
label: 'array<boolean>',
label: 'Array<boolean>',
value: WorkflowIOValueTypeEnum.arrayBoolean
},
[WorkflowIOValueTypeEnum.arrayObject]: {
label: 'array<object>',
label: 'Array<object>',
value: WorkflowIOValueTypeEnum.arrayObject
},
[WorkflowIOValueTypeEnum.arrayAny]: {
label: 'array',
label: 'Array',
value: WorkflowIOValueTypeEnum.arrayAny
},
[WorkflowIOValueTypeEnum.any]: {
label: 'any',
label: 'Any',
value: WorkflowIOValueTypeEnum.any
},
[WorkflowIOValueTypeEnum.chatHistory]: {

View File

@@ -216,5 +216,7 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
[NodeInputKeyEnum.aiChatQuotePrompt]?: string;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.stringQuoteText]?: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
};

View File

@@ -75,10 +75,17 @@ export const Input_Template_Text_Quote: FlowNodeInputItemType = {
description: i18nT('app:document_quote_tip'),
valueType: WorkflowIOValueTypeEnum.string
};
export const Input_Template_File_Link_Prompt: FlowNodeInputItemType = {
key: NodeInputKeyEnum.fileUrlList,
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input],
label: i18nT('app:file_quote_link'),
debugLabel: i18nT('app:file_quote_link'),
valueType: WorkflowIOValueTypeEnum.arrayString
};
export const Input_Template_File_Link: FlowNodeInputItemType = {
key: NodeInputKeyEnum.fileUrlList,
renderTypeList: [FlowNodeInputTypeEnum.reference],
required: true,
label: i18nT('app:workflow.user_file_input'),
debugLabel: i18nT('app:workflow.user_file_input'),
description: i18nT('app:workflow.user_file_input_desc'),

View File

@@ -17,7 +17,8 @@ import {
Input_Template_History,
Input_Template_System_Prompt,
Input_Template_UserChatInput,
Input_Template_Text_Quote
Input_Template_Text_Quote,
Input_Template_File_Link_Prompt
} from '../../input';
import { chatNodeSystemPromptTip, systemPromptTip } from '../../tip';
import { getHandleConfig } from '../../utils';
@@ -55,7 +56,7 @@ export const AiChatModule: FlowNodeTemplateType = {
showStatus: true,
isTool: true,
courseUrl: '/docs/workflow/modules/ai_chat/',
version: '481',
version: '4813',
inputs: [
Input_Template_SettingAiModel,
// --- settings modal
@@ -100,7 +101,7 @@ export const AiChatModule: FlowNodeTemplateType = {
},
Input_Template_History,
Input_Template_Dataset_Quote,
Input_Template_Text_Quote,
Input_Template_File_Link_Prompt,
{ ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') }
],

View File

@@ -23,7 +23,7 @@ export const ReadFilesNode: FlowNodeTemplateType = {
name: i18nT('app:workflow.read_files'),
intro: i18nT('app:workflow.read_files_tip'),
showStatus: true,
version: '489',
version: '4812',
isTool: true,
inputs: [
{

View File

@@ -20,6 +20,7 @@ import { chatNodeSystemPromptTip, systemPromptTip } from '../tip';
import { LLMModelTypeEnum } from '../../../ai/constants';
import { getHandleConfig } from '../utils';
import { i18nT } from '../../../../../web/i18n/utils';
import { Input_Template_File_Link_Prompt } from '../input';
export const ToolModule: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.tools,
@@ -32,7 +33,7 @@ export const ToolModule: FlowNodeTemplateType = {
intro: i18nT('workflow:template.tool_call_intro'),
showStatus: true,
courseUrl: '/docs/workflow/modules/tool/',
version: '481',
version: '4813',
inputs: [
{
...Input_Template_SettingAiModel,
@@ -57,7 +58,7 @@ export const ToolModule: FlowNodeTemplateType = {
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
value: false
},
{
@@ -67,6 +68,7 @@ export const ToolModule: FlowNodeTemplateType = {
placeholder: chatNodeSystemPromptTip
},
Input_Template_History,
Input_Template_File_Link_Prompt,
Input_Template_UserChatInput
],
outputs: [

View File

@@ -56,6 +56,11 @@ export type FlowNodeInputItemType = InputComponentPropsType & {
canEdit?: boolean; // dynamic inputs
isPro?: boolean; // Pro version field
isToolOutput?: boolean;
// file
canSelectFile?: boolean;
canSelectImg?: boolean;
maxFiles?: number;
};
export type FlowNodeOutputItemType = {

View File

@@ -32,6 +32,7 @@ import { IfElseResultEnum } from './template/system/ifElse/constant';
import { RuntimeNodeItemType } from './runtime/type';
import { getReferenceVariableValue } from './runtime/utils';
import {
Input_Template_File_Link,
Input_Template_History,
Input_Template_Stream_MODE,
Input_Template_UserChatInput
@@ -261,8 +262,10 @@ export const appData2FlowNodeIO = ({
inputs: [
Input_Template_Stream_MODE,
Input_Template_History,
...(chatConfig?.fileSelectConfig?.canSelectFile || chatConfig?.fileSelectConfig?.canSelectImg
? [Input_Template_File_Link]
: []),
Input_Template_UserChatInput,
// ...(showFileLink ? [Input_Template_File_Link] : []),
...variableInput
],
outputs: [

View File

@@ -109,7 +109,7 @@ export const loadRequestMessages = async ({
}
return Promise.all(
messages.map(async (item) => {
if (item.type === 'image_url') {
if (item.type === 'image_url' && process.env.MULTIPLE_DATA_TO_BASE64 === 'true') {
// Remove url origin
const imgUrl = (() => {
if (origin && item.image_url.url.startsWith(origin)) {
@@ -149,7 +149,7 @@ export const loadRequestMessages = async ({
};
// Split question text and image
const parseStringWithImages = (input: string): ChatCompletionContentPart[] => {
if (!useVision) {
if (!useVision || input.length > 500) {
return [{ type: 'text', text: input || '' }];
}
@@ -170,8 +170,8 @@ export const loadRequestMessages = async ({
});
});
// Too many images or too long text, return text
if (httpsImages.length > 4 || input.length > 1000) {
// Too many images return text
if (httpsImages.length > 4) {
return [{ type: 'text', text: input || '' }];
}
@@ -179,7 +179,7 @@ export const loadRequestMessages = async ({
result.push({ type: 'text', text: input });
return result;
};
// Parse user content(text and img)
// Parse user content(text and img) Store history => api messages
const parseUserContent = async (content: string | ChatCompletionContentPart[]) => {
if (typeof content === 'string') {
return loadImageToBase64(parseStringWithImages(content));

View File

@@ -25,45 +25,16 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
import { filterToolResponseToPreview } from './utils';
import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
[DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType;
}>;
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
export const toolCallMessagesAdapt = ({
userInput
}: {
userInput: UserChatItemValueItemType[];
}) => {
const files = userInput.filter((item) => item.type === 'file');
if (files.length > 0) {
return userInput.map((item) => {
if (item.type === 'text') {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
return userInput;
};
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
node: { nodeId, name, isEntry },
@@ -71,11 +42,21 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
runtimeEdges,
histories,
query,
params: { model, systemPrompt, userChatInput, history = 6 }
requestOrigin,
chatConfig,
runningAppInfo: { teamId },
params: {
model,
systemPrompt,
userChatInput,
history = 6,
fileUrlList: fileLinks,
aiChatVision
}
} = props;
const toolModel = getLLMModel(model);
const useVision = aiChatVision && toolModel.vision;
const chatHistories = getHistories(history, histories);
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
@@ -109,18 +90,43 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
}
})();
props.node.isEntry = false;
const hasReadFilesTool = toolNodes.some(
(item) => item.flowNodeType === FlowNodeTypeEnum.readFiles
);
const globalFiles = chatValue2RuntimePrompt(query).files;
const { documentQuoteText, userFiles } = await getMultiInput({
histories,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
teamId,
fileLinks,
inputFiles: globalFiles
});
const concatenateSystemPrompt = [
toolModel.defaultSystemChatPrompt,
systemPrompt,
documentQuoteText
? replaceVariable(Prompt_DocumentQuote, {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
const messages: ChatItemType[] = (() => {
const value: ChatItemType[] = [
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
...getSystemPrompt_ChatItemType(systemPrompt),
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value
userInput: item.value,
skip: !hasReadFilesTool
})
};
}
@@ -129,9 +135,10 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
skip: !hasReadFilesTool,
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
files: userFiles
})
})
}
@@ -237,7 +244,11 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000),
historyPreview: getHistoryPreview(
GPTMessages2Chats(completeMessages, false),
10000,
useVision
),
toolDetail: childToolResponse,
mergeSignId: nodeId
},
@@ -253,3 +264,88 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
};
};
const getMultiInput = async ({
histories,
fileLinks,
requestOrigin,
maxFiles,
teamId,
inputFiles
}: {
histories: ChatItemType[];
fileLinks?: string[];
requestOrigin?: string;
maxFiles: number;
teamId: string;
inputFiles: UserChatItemValueItemType['file'][];
}) => {
// Not file quote
if (!fileLinks) {
return {
documentQuoteText: '',
userFiles: inputFiles
};
}
const filesFromHistories = getHistoryFileLinks(histories);
const urls = [...fileLinks, ...filesFromHistories];
if (urls.length === 0) {
return {
documentQuoteText: '',
userFiles: []
};
}
// Get files from histories
const { text } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls,
requestOrigin,
maxFiles,
teamId
});
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url))
};
};
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
const toolCallMessagesAdapt = ({
userInput,
skip
}: {
userInput: UserChatItemValueItemType[];
skip?: boolean;
}) => {
if (skip) return userInput;
const files = userInput.filter((item) => item.type === 'file');
if (files.length > 0) {
return userInput.map((item) => {
if (item.type === 'text') {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
return userInput;
};

View File

@@ -21,6 +21,7 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.fileUrlList]?: string[];
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];

View File

@@ -46,6 +46,8 @@ import { WorkflowResponseType } from '../type';
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
@@ -69,7 +71,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
histories,
node: { name },
query,
runningAppInfo: { teamId },
workflowStreamResponse,
chatConfig,
params: {
model,
temperature = 0,
@@ -83,10 +87,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
quoteTemplate,
quotePrompt,
aiChatVision,
stringQuoteText
fileUrlList: fileLinks, // node quote file links
stringQuoteText //abandon
}
} = props;
const { files: inputFiles } = chatValue2RuntimePrompt(query);
const { files: inputFiles } = chatValue2RuntimePrompt(query); // Chat box input files
if (!userChatInput && inputFiles.length === 0) {
return Promise.reject('Question is empty');
@@ -100,11 +105,22 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
const { datasetQuoteText } = await filterDatasetQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
});
const [{ datasetQuoteText }, { documentQuoteText, userFiles }] = await Promise.all([
filterDatasetQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
}),
getMultiInput({
histories,
inputFiles,
fileLinks,
stringQuoteText,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
teamId
})
]);
const [{ filterMessages }] = await Promise.all([
getChatMessages({
@@ -115,9 +131,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
aiChatQuoteRole,
datasetQuotePrompt: quotePrompt,
userChatInput,
inputFiles,
systemPrompt,
stringQuoteText
userFiles,
documentQuoteText
}),
(() => {
// censor model and system key
@@ -132,22 +148,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
})()
]);
// Get the request messages
const concatMessages = [
...(modelConstantsData.defaultSystemChatPrompt
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystemChatPrompt
}
]
: []),
...filterMessages
] as ChatCompletionMessageParam[];
const [requestMessages, max_tokens] = await Promise.all([
loadRequestMessages({
messages: concatMessages,
messages: filterMessages,
useVision: modelConstantsData.vision && aiChatVision,
origin: requestOrigin
}),
@@ -242,7 +245,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000),
historyPreview: getHistoryPreview(
chatCompleteMessages,
10000,
modelConstantsData.vision && aiChatVision
),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
@@ -302,7 +309,70 @@ async function filterDatasetQuote({
datasetQuoteText
};
}
async function getMultiInput({
histories,
inputFiles,
fileLinks,
stringQuoteText,
requestOrigin,
maxFiles,
teamId
}: {
histories: ChatItemType[];
inputFiles: UserChatItemValueItemType['file'][];
fileLinks?: string[];
stringQuoteText?: string; // file quote
requestOrigin?: string;
maxFiles: number;
teamId: string;
}) {
// 旧版本适配====>
if (stringQuoteText) {
return {
documentQuoteText: stringQuoteText,
userFiles: inputFiles
};
}
// 没有引用文件参考,但是可能用了图片识别
if (!fileLinks) {
return {
documentQuoteText: '',
userFiles: inputFiles
};
}
// 旧版本适配<====
// If fileLinks params is not empty, it means it is a new version, not get the global file.
// Get files from histories
const filesFromHistories = getHistoryFileLinks(histories);
const urls = [...fileLinks, ...filesFromHistories];
if (urls.length === 0) {
return {
documentQuoteText: '',
userFiles: []
};
}
const { text } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls,
requestOrigin,
maxFiles,
teamId
});
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url))
};
}
async function getChatMessages({
model,
aiChatQuoteRole,
datasetQuotePrompt = '',
datasetQuoteText,
@@ -310,10 +380,10 @@ async function getChatMessages({
histories = [],
systemPrompt,
userChatInput,
inputFiles,
model,
stringQuoteText
userFiles,
documentQuoteText
}: {
model: LLMModelItemType;
// dataset quote
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
datasetQuotePrompt?: string;
@@ -323,10 +393,11 @@ async function getChatMessages({
histories: ChatItemType[];
systemPrompt: string;
userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType;
stringQuoteText?: string; // file quote
userFiles: UserChatItemValueItemType['file'][];
documentQuoteText?: string; // document quote
}) {
// Dataset prompt ====>
// User role or prompt include question
const quoteRole =
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
@@ -337,6 +408,7 @@ async function getChatMessages({
? Prompt_userQuotePromptList[0].value
: Prompt_systemQuotePromptList[0].value;
// Reset user input, add dataset quote to user input
const replaceInputValue =
useDatasetQuote && quoteRole === 'user'
? replaceVariable(datasetQuotePromptTemplate, {
@@ -344,31 +416,33 @@ async function getChatMessages({
question: userChatInput
})
: userChatInput;
// Dataset prompt <====
const replaceSystemPrompt =
// Concat system prompt
const concatenateSystemPrompt = [
model.defaultSystemChatPrompt,
systemPrompt,
useDatasetQuote && quoteRole === 'system'
? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable(
datasetQuotePromptTemplate,
{
quote: datasetQuoteText
}
)}`
: systemPrompt;
? replaceVariable(datasetQuotePromptTemplate, {
quote: datasetQuoteText
})
: '',
documentQuoteText
? replaceVariable(Prompt_DocumentQuote, {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
const messages: ChatItemType[] = [
...getSystemPrompt_ChatItemType(replaceSystemPrompt),
...(stringQuoteText // file quote
? getSystemPrompt_ChatItemType(
replaceVariable(Prompt_DocumentQuote, {
quote: stringQuoteText
})
)
: []),
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
...histories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
files: inputFiles,
files: userFiles,
text: replaceInputValue
})
}

View File

@@ -17,12 +17,14 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
import { authAppByTmbId } from '../../../../support/permission/app/auth';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { getAppVersionById } from '../../../app/version/controller';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.forbidStream]?: boolean;
[NodeInputKeyEnum.fileUrlList]?: string[];
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
@@ -40,8 +42,24 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
variables
} = props;
const { system_forbid_stream = false, userChatInput, history, ...childrenAppVariables } = params;
if (!userChatInput) {
const {
system_forbid_stream = false,
userChatInput,
history,
fileUrlList,
...childrenAppVariables
} = params;
const { files } = chatValue2RuntimePrompt(query);
const userInputFiles = (() => {
if (fileUrlList) {
return fileUrlList.map((url) => parseUrlToFileType(url));
}
// Adapt version 4.8.13 upgrade
return files;
})();
if (!userChatInput && !userInputFiles) {
return Promise.reject('Input is empty');
}
if (!appId) {
@@ -72,7 +90,6 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
}
const chatHistories = getHistories(history, histories);
const { files } = chatValue2RuntimePrompt(query);
// Rewrite children app variables
const systemVariables = filterSystemVariables(variables);
@@ -102,7 +119,7 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
histories: chatHistories,
variables: childrenRunVariables,
query: runtimePrompt2ChatsValue({
files,
files: userInputFiles,
text: userChatInput
}),
chatConfig

View File

@@ -1,4 +1,5 @@
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
@@ -11,6 +12,24 @@ export const dispatchPluginInput = (props: PluginInputProps) => {
const { params, query } = props;
const { files } = chatValue2RuntimePrompt(query);
/*
对 params 中文件类型数据进行处理
* 插件单独运行时,这里会是一个特殊的数组
* 插件调用的话,这个参数是一个 string[] 不会进行处理
* 硬性要求API 单独调用插件时,要避免这种特殊类型冲突
*/
for (const key in params) {
const val = params[key];
if (
Array.isArray(val) &&
val.every(
(item) => item.type === ChatFileTypeEnum.file || item.type === ChatFileTypeEnum.image
)
) {
params[key] = val.map((item) => item.url);
}
}
return {
...params,
[DispatchNodeResponseKeyEnum.nodeResponse]: {},

View File

@@ -2,16 +2,15 @@ import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runti
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { documentFileType } from '@fastgpt/global/common/file/constants';
import axios from 'axios';
import { serverRequestBaseUrl } from '../../../../common/api/serverRequest';
import { MongoRawTextBuffer } from '../../../../common/buffer/rawText/schema';
import { readFromSecondary } from '../../../../common/mongo/utils';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
import { detectFileEncoding, parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { readRawContentByFileBuffer } from '../../../../common/file/read/utils';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { parseFileExtensionFromUrl } from '@fastgpt/global/common/string/tools';
type Props = ModuleDispatchProps<{
@@ -48,12 +47,41 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
runningAppInfo: { teamId },
histories,
chatConfig,
node: { version },
params: { fileUrlList = [] }
} = props;
const maxFiles = chatConfig?.fileSelectConfig?.maxFiles || 20;
// Get files from histories
const filesFromHistories = histories
const filesFromHistories = version !== '489' ? [] : getHistoryFileLinks(histories);
const { text, readFilesResult } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls: [...fileUrlList, ...filesFromHistories],
requestOrigin,
maxFiles,
teamId
});
return {
[NodeOutputKeyEnum.text]: text,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
readFiles: readFilesResult.map((item) => ({
name: item?.filename || '',
url: item?.url || ''
})),
readFilesResult: readFilesResult
.map((item) => item?.nodeResponsePreviewText ?? '')
.join('\n******\n')
},
[DispatchNodeResponseKeyEnum.toolResponses]: {
fileContent: text
}
};
};
export const getHistoryFileLinks = (histories: ChatItemType[]) => {
return histories
.filter((item) => {
if (item.obj === ChatRoleEnum.Human) {
return item.value.filter((value) => value.type === 'file');
@@ -70,26 +98,27 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
return files;
})
.flat();
};
// Concat fileUrlList and filesFromHistories; remove not supported files
const parseUrlList = [...fileUrlList, ...filesFromHistories]
export const getFileContentFromLinks = async ({
urls,
requestOrigin,
maxFiles,
teamId
}: {
urls: string[];
requestOrigin?: string;
maxFiles: number;
teamId: string;
}) => {
const parseUrlList = urls
.map((url) => {
try {
// Avoid "/api/xxx" file error.
const origin = requestOrigin ?? 'http://localhost:3000';
// Check is system upload file
if (url.startsWith('/') || (requestOrigin && url.startsWith(requestOrigin))) {
// Parse url, get filename query. Keep only documents that can be parsed
const parseUrl = new URL(url, origin);
const filenameQuery = parseUrl.searchParams.get('filename');
// Not document
if (filenameQuery) {
const extensionQuery = filenameQuery.split('.').pop()?.toLowerCase() || '';
if (!documentFileType.includes(extensionQuery)) {
return '';
}
if (parseUrlToFileType(url)?.type !== 'file') {
return '';
}
// Remove the origin(Make intranet requests directly)
@@ -197,18 +226,7 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
const text = readFilesResult.map((item) => item?.text ?? '').join('\n******\n');
return {
[NodeOutputKeyEnum.text]: text,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
readFiles: readFilesResult.map((item) => ({
name: item?.filename || '',
url: item?.url || ''
})),
readFilesResult: readFilesResult
.map((item) => item?.nodeResponsePreviewText ?? '')
.join('\n******\n')
},
[DispatchNodeResponseKeyEnum.toolResponses]: {
fileContent: text
}
text,
readFilesResult
};
};

View File

@@ -1,14 +1,5 @@
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { countPromptTokens } from '../../common/string/tiktoken/index';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
getPluginInputsFromStoreNodes,
getPluginRunContent
} from '@fastgpt/global/core/app/plugin/utils';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { RuntimeUserPromptType, UserChatItemType } from '@fastgpt/global/core/chat/type';
import { runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
/* filter search result */
export const filterSearchResultsByMaxChars = async (

View File

@@ -40,6 +40,7 @@
"export_config_successful": "Configuration copied, some sensitive information automatically filtered. Please check for any remaining sensitive data.",
"export_configs": "Export Configurations",
"feedback_count": "User Feedback",
"file_quote_link": "Files",
"file_recover": "File will overwrite current content",
"file_upload": "File Upload",
"file_upload_tip": "Once enabled, documents/images can be uploaded. Documents are retained for 7 days, images for 15 days. Using this feature may incur additional costs. To ensure a good experience, please choose an AI model with a larger context length when using this feature.",
@@ -47,7 +48,7 @@
"go_to_chat": "Go to Conversation",
"go_to_run": "Go to Execution",
"image_upload": "Image Upload",
"image_upload_tip": "Please ensure to select a vision model that can process images.",
"image_upload_tip": "How to activate model image recognition capabilities",
"import_configs": "Import Configurations",
"import_configs_failed": "Import configuration failed, please ensure the configuration is correct!",
"import_configs_success": "Import Successful",
@@ -61,7 +62,7 @@
"intro": "A comprehensive model application orchestration system that offers out-of-the-box data processing and model invocation capabilities. It allows for rapid Dataset construction and workflow orchestration through Flow visualization, enabling complex Dataset scenarios!",
"llm_not_support_vision": "This model does not support image recognition",
"llm_use_vision": "Enable Image Recognition",
"llm_use_vision_tip": "Once image recognition is enabled, this model will automatically receive images uploaded from the 'dialog box' and image links in 'user questions'.",
"llm_use_vision_tip": "After clicking on the model selection, you can see whether the model supports image recognition and the ability to control whether to start image recognition. \nAfter starting image recognition, the model will read the image content in the file link, and if the user question is less than 500 words, it will automatically parse the image in the user question.",
"logs_chat_user": "user",
"logs_empty": "No logs yet~",
"logs_message_total": "Total Messages",
@@ -74,6 +75,7 @@
"month.unit": "Day",
"move_app": "Move Application",
"not_json_file": "Please select a JSON file",
"open_vision_function_tip": "Models with icon switches have image recognition capabilities. \nAfter being turned on, the model will parse the pictures in the file link and automatically parse the pictures in the user's question (user question ≤ 500 words).",
"or_drag_JSON": "or drag in JSON file",
"paste_config": "Paste Configuration",
"permission.des.manage": "Based on write permissions, you can configure publishing channels, view conversation logs, and assign permissions to the application.",
@@ -135,7 +137,7 @@
"version_back": "Revert to Original State",
"version_copy": "Duplicate",
"version_initial_copy": "Duplicate - Original State",
"vision_model_title": "Enable Image Recognition",
"vision_model_title": "Image recognition ability",
"week.Friday": "Friday",
"week.Monday": "Monday",
"week.Saturday": "Saturday",

View File

@@ -29,6 +29,7 @@
"multiple_AI_conversations": "Multiple AI Conversations",
"new_input_guide_lexicon": "New Lexicon",
"no_workflow_response": "No workflow data",
"not_select_file": "No file selected",
"plugins_output": "Plugin Output",
"question_tip": "From top to bottom, the response order of each module",
"response.node_inputs": "Node Inputs",
@@ -40,4 +41,4 @@
"upload": "Upload",
"view_citations": "View References",
"web_site_sync": "Web Site Sync"
}
}

View File

@@ -122,6 +122,8 @@
"pass_returned_object_as_output_to_next_nodes": "Pass the object returned in the code as output to the next nodes. The variable name needs to correspond to the return key.",
"plugin.Instruction_Tip": "You can configure an instruction to explain the purpose of the plugin. This instruction will be displayed each time the plugin is used. Supports standard Markdown syntax.",
"plugin.Instructions": "Instructions",
"plugin.global_file_input": "File links (deprecated)",
"plugin_file_abandon_tip": "Plugin global file upload has been deprecated, please adjust it as soon as possible. \nRelated functions can be achieved through plug-in input and adding image type input.",
"plugin_input": "Plugin Input",
"plugin_output_tool": "When the plug-in is executed as a tool, whether this field responds as a result of the tool",
"question_classification": "Question Classification",
@@ -190,4 +192,4 @@
"workflow.Switch_success": "Switch Successful",
"workflow.Team cloud": "Team Cloud",
"workflow.exit_tips": "Your changes have not been saved. 'Exit directly' will not save your edits."
}
}

View File

@@ -40,6 +40,7 @@
"export_config_successful": "已复制配置,自动过滤部分敏感信息,请注意检查是否仍有敏感数据",
"export_configs": "导出配置",
"feedback_count": "用户反馈",
"file_quote_link": "文件链接",
"file_recover": "文件将覆盖当前内容",
"file_upload": "文件上传",
"file_upload_tip": "开启后,可以上传文档/图片。文档保留7天图片保留15天。使用该功能可能产生较多额外费用。为保证使用体验使用该功能时请选择上下文长度较大的AI模型。",
@@ -47,7 +48,7 @@
"go_to_chat": "去对话",
"go_to_run": "去运行",
"image_upload": "图片上传",
"image_upload_tip": "请确保选择可处理图片的视觉模型",
"image_upload_tip": "如何启动模型图片识别能力",
"import_configs": "导入配置",
"import_configs_failed": "导入配置失败,请确保配置正常!",
"import_configs_success": "导入成功",
@@ -60,8 +61,8 @@
"interval.per_hour": "每小时",
"intro": "是一个大模型应用编排系统,提供开箱即用的数据处理、模型调用等能力,可以快速的构建知识库并通过 Flow 可视化进行工作流编排,实现复杂的知识库场景!",
"llm_not_support_vision": "该模型不支持图片识别",
"llm_use_vision": "启用图片识别",
"llm_use_vision_tip": "启用图片识别后,模型会自动接收来自“对话框上传”的图片,以及“用户问题中的图片链接。",
"llm_use_vision": "图片识别",
"llm_use_vision_tip": "点击模型选择后,可以看到模型是否支持图片识别以及控制是否启动图片识别的能力。启动图片识别后,模型会读取文件链接里图片内容,并且如果用户问题少于 500 字,会自动解析用户问题中的图片。",
"logs_chat_user": "使用者",
"logs_empty": "还没有日志噢~",
"logs_message_total": "消息总数",
@@ -72,9 +73,10 @@
"module.type": "\"{{type}}\"类型\n{{description}}",
"modules.Title is required": "模块名不能为空",
"month.unit": "号",
"move_app": "移动应用",
"move.hint": "移动后,所选应用/文件夹将继承新文件夹的权限设置,原先的权限设置失效。",
"move_app": "移动应用",
"not_json_file": "请选择JSON文件",
"open_vision_function_tip": "有图示开关的模型即拥有图片识别能力。若开启模型会解析文件链接里的图片并自动解析用户问题中的图片用户问题≤500字时生效。",
"or_drag_JSON": "或拖入JSON文件",
"paste_config": "粘贴配置",
"permission.des.manage": "写权限基础上,可配置发布渠道、查看对话日志、分配该应用权限",
@@ -136,7 +138,7 @@
"version_back": "回到初始状态",
"version_copy": "副本",
"version_initial_copy": "副本-初始状态",
"vision_model_title": "启用图片识别",
"vision_model_title": "图片识别能力",
"week.Friday": "星期五",
"week.Monday": "星期一",
"week.Saturday": "星期六",

View File

@@ -29,6 +29,7 @@
"multiple_AI_conversations": "多组 AI 对话",
"new_input_guide_lexicon": "新词库",
"no_workflow_response": "没有运行数据",
"not_select_file": "未选择文件",
"plugins_output": "插件输出",
"question_tip": "从上到下,为各个模块的响应顺序",
"response.child total points": "子工作流积分消耗",
@@ -41,4 +42,4 @@
"upload": "上传",
"view_citations": "查看引用",
"web_site_sync": "Web站点同步"
}
}

View File

@@ -123,6 +123,8 @@
"pass_returned_object_as_output_to_next_nodes": "将代码中 return 的对象作为输出,传递给后续的节点。变量名需要对应 return 的 key",
"plugin.Instruction_Tip": "可以配置一段说明,以解释该插件的用途。每次使用插件前,会显示该段说明。支持标准 Markdown 语法。",
"plugin.Instructions": "使用说明",
"plugin.global_file_input": "文件链接(弃用)",
"plugin_file_abandon_tip": "插件全局文件上传已弃用,请尽快调整。可以通过插件输入,添加图片类型输入来实现相关功能。",
"plugin_input": "插件输入",
"plugin_output_tool": "插件作为工具执行时,该字段是否作为工具响应结果",
"question_classification": "问题分类",
@@ -192,4 +194,4 @@
"workflow.Switch_success": "切换成功",
"workflow.Team cloud": "团队云端",
"workflow.exit_tips": "您的更改尚未保存,「直接退出」将不会保存您的编辑记录。"
}
}

View File

@@ -16,6 +16,9 @@ OPENAI_BASE_URL=https://api.openai.com/v1
# 通用key。可以是 openai 的也可以是 oneapi 的。
# 此处逻辑:优先走 ONEAPI_URL如果填写了 ONEAPI_URLkey 也需要是 ONEAPI 的 key
CHAT_API_KEY=sk-xxxx
# 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true
MULTIPLE_DATA_TO_BASE64=true
# mongo 数据库连接参数,本地开发连接远程数据库时,可能需要增加 directConnection=true 参数,才能连接上。
MONGODB_URI=mongodb://username:password@0.0.0.0:27017/fastgpt?authSource=admin

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 48 KiB

View File

@@ -58,8 +58,8 @@ const ChatFunctionTip = ({ type }: { type: `${FnTypeEnum}` }) => {
[FnTypeEnum.visionModel]: {
icon: '/imgs/app/question.svg',
title: t('app:vision_model_title'),
desc: t('app:llm_use_vision_tip'),
imgUrl: '/imgs/app/visionModel.png'
desc: t('app:open_vision_function_tip'),
imgUrl: '/imgs/app/visionModel.svg'
},
[FnTypeEnum.instruction]: {
icon: '/imgs/app/help.svg',

View File

@@ -65,10 +65,6 @@ const VariableEdit = ({
const { setValue, reset, watch, getValues } = form;
const value = getValues();
const type = watch('type');
const valueType = watch('valueType');
const max = watch('max');
const min = watch('min');
const defaultValue = watch('defaultValue');
const inputTypeList = useMemo(
() =>
@@ -376,11 +372,7 @@ const VariableEdit = ({
type={'variable'}
isEdit={!!value.key}
inputType={type}
valueType={valueType}
defaultValue={defaultValue}
defaultValueType={defaultValueType}
max={max}
min={min}
onClose={() => reset({})}
onSubmitSuccess={onSubmitSuccess}
onSubmitError={onSubmitError}

View File

@@ -8,7 +8,7 @@ import MyIcon from '@fastgpt/web/components/common/Icon';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import { ChatBoxInputFormType, ChatBoxInputType, SendPromptFnType } from '../type';
import { textareaMinH } from '../constants';
import { UseFormReturn } from 'react-hook-form';
import { useFieldArray, UseFormReturn } from 'react-hook-form';
import { ChatBoxContext } from '../Provider';
import dynamic from 'next/dynamic';
import { useContextSelector } from 'use-context-selector';
@@ -58,6 +58,10 @@ const ChatInput = ({
fileSelectConfig
} = useContextSelector(ChatBoxContext, (v) => v);
const fileCtrl = useFieldArray({
control,
name: 'files'
});
const {
File,
onOpenSelectFile,
@@ -74,7 +78,7 @@ const ChatInput = ({
outLinkAuthData,
chatId: chatId || '',
fileSelectConfig,
control
fileCtrl
});
const havInput = !!inputValue || fileList.length > 0;
const hasFileUploading = fileList.some((item) => !item.url);
@@ -468,7 +472,7 @@ const ChatInput = ({
{RenderTranslateLoading}
{/* file preview */}
<Box px={[2, 4]}>
<Box px={[1, 3]}>
<FilePreview fileList={fileList} removeFiles={removeFiles} />
</Box>

View File

@@ -64,14 +64,14 @@ export const VariableInputItem = ({
minH={40}
maxH={160}
bg={'myGray.50'}
{...register(item.key, {
{...register(`variables.${item.key}`, {
required: item.required
})}
/>
)}
{item.type === VariableInputEnum.textarea && (
<Textarea
{...register(item.key, {
{...register(`variables.${item.key}`, {
required: item.required
})}
rows={5}
@@ -82,9 +82,9 @@ export const VariableInputItem = ({
{item.type === VariableInputEnum.select && (
<Controller
key={item.key}
key={`variables.${item.key}`}
control={control}
name={item.key}
name={`variables.${item.key}`}
rules={{ required: item.required }}
render={({ field: { ref, value } }) => {
return (
@@ -96,7 +96,7 @@ export const VariableInputItem = ({
value: item.value
}))}
value={value}
onchange={(e) => setValue(item.key, e)}
onchange={(e) => setValue(`variables.${item.key}`, e)}
/>
);
}}
@@ -104,9 +104,9 @@ export const VariableInputItem = ({
)}
{item.type === VariableInputEnum.numberInput && (
<Controller
key={item.key}
key={`variables.${item.key}`}
control={control}
name={item.key}
name={`variables.${item.key}`}
rules={{ required: item.required, min: item.min, max: item.max }}
render={({ field: { ref, value, onChange } }) => (
<NumberInput

View File

@@ -9,21 +9,22 @@ import { getFileIcon } from '@fastgpt/global/common/file/icon';
import { formatFileSize } from '@fastgpt/global/common/file/tools';
import { clone } from 'lodash';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { Control, useFieldArray } from 'react-hook-form';
import { UseFieldArrayReturn } from 'react-hook-form';
import { ChatBoxInputFormType, UserInputFileItemType } from '../type';
import { AppFileSelectConfigType } from '@fastgpt/global/core/app/type';
import { documentFileType } from '@fastgpt/global/common/file/constants';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
interface UseFileUploadOptions {
outLinkAuthData: any;
type UseFileUploadOptions = {
outLinkAuthData: OutLinkChatAuthProps;
chatId: string;
fileSelectConfig: AppFileSelectConfigType;
control: Control<ChatBoxInputFormType, any>;
}
fileCtrl: UseFieldArrayReturn<ChatBoxInputFormType, 'files', 'id'>;
};
export const useFileUpload = (props: UseFileUploadOptions) => {
const { outLinkAuthData, chatId, fileSelectConfig, control } = props;
const { outLinkAuthData, chatId, fileSelectConfig, fileCtrl } = props;
const { toast } = useToast();
const { t } = useTranslation();
const { feConfigs } = useSystemStore();
@@ -33,15 +34,13 @@ export const useFileUpload = (props: UseFileUploadOptions) => {
remove: removeFiles,
fields: fileList,
replace: replaceFiles
} = useFieldArray({
control: control,
name: 'files'
});
} = fileCtrl;
const showSelectFile = fileSelectConfig?.canSelectFile;
const showSelectImg = fileSelectConfig?.canSelectImg;
const maxSelectFiles = fileSelectConfig?.maxFiles ?? 10;
const maxSize = (feConfigs?.uploadFileMaxSize || 1024) * 1024 * 1024; // nkb
const canSelectFileAmount = maxSelectFiles - fileList.length;
const { icon: selectFileIcon, label: selectFileLabel } = useMemo(() => {
if (showSelectFile && showSelectImg) {
@@ -66,7 +65,7 @@ export const useFileUpload = (props: UseFileUploadOptions) => {
const { File, onOpen: onOpenSelectFile } = useSelectFile({
fileType: `${showSelectImg ? 'image/*,' : ''} ${showSelectFile ? documentFileType : ''}`,
multiple: true,
maxCount: maxSelectFiles
maxCount: canSelectFileAmount
});
const onSelectFile = useCallback(

View File

@@ -393,7 +393,7 @@ const ChatBox = (
isInteractivePrompt = false
}) => {
variablesForm.handleSubmit(
async (variables) => {
async ({ variables }) => {
if (!onStartChat) return;
if (isChatting) {
toast({

View File

@@ -20,9 +20,9 @@ export type UserInputFileItemType = {
export type ChatBoxInputFormType = {
input: string;
files: UserInputFileItemType[];
files: UserInputFileItemType[]; // global files
chatStarted: boolean;
[key: string]: any;
variables: Record<string, any>;
};
export type ChatBoxInputType = {

View File

@@ -1,5 +1,5 @@
import React, { useCallback, useEffect, useMemo } from 'react';
import { Controller } from 'react-hook-form';
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { Controller, useFieldArray } from 'react-hook-form';
import RenderPluginInput from './renderPluginInput';
import { Box, Button, Flex } from '@chakra-ui/react';
import { useTranslation } from 'next-i18next';
@@ -14,7 +14,8 @@ import { useFileUpload } from '../../ChatBox/hooks/useFileUpload';
import FilePreview from '../../components/FilePreview';
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import { ChatBoxInputFormType, UserInputFileItemType } from '../../ChatBox/type';
import { ChatBoxInputFormType } from '../../ChatBox/type';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
const RenderInput = () => {
const { t } = useTranslation();
@@ -29,9 +30,7 @@ const RenderInput = () => {
isChatting,
chatConfig,
chatId,
outLinkAuthData,
restartInputStore,
setRestartInputStore
outLinkAuthData
} = useContextSelector(PluginRunContext, (v) => v);
const {
@@ -42,6 +41,11 @@ const RenderInput = () => {
formState: { errors }
} = variablesForm;
/* ===> Global files(abandon) */
const fileCtrl = useFieldArray({
control: variablesForm.control,
name: 'files'
});
const {
File,
onOpenSelectFile,
@@ -57,41 +61,72 @@ const RenderInput = () => {
outLinkAuthData,
chatId: chatId || '',
fileSelectConfig: chatConfig?.fileSelectConfig,
control
fileCtrl
});
const isDisabledInput = histories.length > 0;
const hasFileUploading = useMemo(() => {
return fileList.some((item) => !item.url);
}, [fileList]);
useRequest2(uploadFiles, {
manual: false,
errorToast: t('common:upload_file_error'),
refreshDeps: [fileList, outLinkAuthData, chatId]
});
/* Global files(abandon) <=== */
const [restartData, setRestartData] = useState<ChatBoxInputFormType>();
const onClickNewChat = useCallback(
(e: ChatBoxInputFormType, files: UserInputFileItemType[] = []) => {
setRestartInputStore({
...e,
files
});
(e: ChatBoxInputFormType) => {
setRestartData(e);
onNewChat?.();
},
[onNewChat, setRestartInputStore]
[onNewChat]
);
const formatPluginInputs = useMemo(() => {
if (histories.length === 0) return pluginInputs;
try {
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
const inputValueString = historyValue.find((item) => item.type === 'text')?.text?.content;
if (!inputValueString) return pluginInputs;
return JSON.parse(inputValueString) as FlowNodeInputItemType[];
} catch (error) {
console.error('Failed to parse input value:', error);
return pluginInputs;
}
}, [histories, pluginInputs]);
// Reset input value
useEffect(() => {
// Set last run value
if (!isDisabledInput && restartInputStore) {
reset(restartInputStore);
// Set config default value
if (histories.length === 0) {
// Restart
if (restartData) {
reset(restartData);
setRestartData(undefined);
return;
}
const defaultFormValues = formatPluginInputs.reduce(
(acc, input) => {
acc[input.key] = input.defaultValue;
return acc;
},
{} as Record<string, any>
);
reset({
files: [],
variables: defaultFormValues
});
return;
}
// Set history to default value
const historyVariables = (() => {
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
if (!historyValue) return undefined;
const defaultFormValues = pluginInputs.reduce(
(acc, input) => {
acc[input.key] = input.defaultValue;
return acc;
},
{} as Record<string, any>
);
const historyFormValues = (() => {
if (!isDisabledInput) return undefined;
const historyValue = histories[0].value;
try {
const inputValueString = historyValue.find((item) => item.type === 'text')?.text?.content;
return (
@@ -115,32 +150,24 @@ const RenderInput = () => {
return undefined;
}
})();
// Parse history file
const historyFileList = (() => {
if (!isDisabledInput) return [];
const historyValue = histories[0].value as UserChatItemValueItemType[];
return historyValue.filter((item) => item.type === 'file').map((item) => item.file);
const historyValue = histories[0]?.value as UserChatItemValueItemType[];
return historyValue?.filter((item) => item.type === 'file').map((item) => item.file);
})();
reset({
...(historyFormValues || defaultFormValues),
variables: historyVariables,
files: historyFileList
});
}, [getValues, histories, isDisabledInput, pluginInputs, replaceFiles, reset, restartInputStore]);
}, [histories.length]);
const hasFileUploading = useMemo(() => {
return fileList.some((item) => !item.url);
}, [fileList]);
const [uploading, setUploading] = useState(false);
useRequest2(uploadFiles, {
manual: false,
errorToast: t('common:upload_file_error'),
refreshDeps: [fileList, outLinkAuthData, chatId]
});
const fileUploading = uploading || hasFileUploading;
return (
<>
<Box>
{/* instruction */}
{chatConfig?.instruction && (
<Box
@@ -155,7 +182,7 @@ const RenderInput = () => {
<Markdown source={chatConfig.instruction} />
</Box>
)}
{/* file select */}
{/* file select(Abandoned) */}
{(showSelectFile || showSelectImg) && (
<Box mb={5}>
<Flex alignItems={'center'}>
@@ -184,12 +211,12 @@ const RenderInput = () => {
</Box>
)}
{/* Filed */}
{pluginInputs.map((input) => {
{formatPluginInputs.map((input) => {
return (
<Controller
key={input.key}
key={`variables.${input.key}`}
control={control}
name={input.key}
name={`variables.${input.key}`}
rules={{
validate: (value) => {
if (!input.required) return true;
@@ -207,6 +234,7 @@ const RenderInput = () => {
isDisabled={isDisabledInput}
isInvalid={errors && Object.keys(errors).includes(input.key)}
input={input}
setUploading={setUploading}
/>
);
}}
@@ -217,13 +245,14 @@ const RenderInput = () => {
{onStartChat && onNewChat && (
<Flex justifyContent={'end'} mt={8}>
<Button
isLoading={isChatting || hasFileUploading}
isLoading={isChatting}
isDisabled={fileUploading}
onClick={() => {
handleSubmit((e) => {
if (isDisabledInput) {
onClickNewChat(e, fileList);
onClickNewChat(e);
} else {
onSubmit(e, fileList);
onSubmit(e);
}
})();
}}
@@ -232,7 +261,7 @@ const RenderInput = () => {
</Button>
</Flex>
)}
</>
</Box>
);
};

View File

@@ -1,5 +1,6 @@
import {
Box,
Button,
Flex,
NumberDecrementStepper,
NumberIncrementStepper,
@@ -12,25 +13,130 @@ import {
import { WorkflowIOValueTypeEnum } from '@fastgpt/global/core/workflow/constants';
import { FlowNodeInputTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import MySelect from '@fastgpt/web/components/common/MySelect';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import { useTranslation } from 'next-i18next';
import dynamic from 'next/dynamic';
import { useFileUpload } from '../../ChatBox/hooks/useFileUpload';
import { useContextSelector } from 'use-context-selector';
import { PluginRunContext } from '../context';
import MyIcon from '@fastgpt/web/components/common/Icon';
import FilePreview from '../../components/FilePreview';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import { useEffect, useMemo } from 'react';
import EmptyTip from '@fastgpt/web/components/common/EmptyTip';
import { useFieldArray } from 'react-hook-form';
const JsonEditor = dynamic(() => import('@fastgpt/web/components/common/Textarea/JsonEditor'));
const FileSelector = ({
input,
setUploading,
onChange
}: {
input: FlowNodeInputItemType;
setUploading: React.Dispatch<React.SetStateAction<boolean>>;
onChange: (...event: any[]) => void;
}) => {
const { t } = useTranslation();
const { variablesForm, histories, chatId, outLinkAuthData } = useContextSelector(
PluginRunContext,
(v) => v
);
const fileCtrl = useFieldArray({
control: variablesForm.control,
name: `variables.${input.key}`
});
const {
File,
fileList,
selectFileIcon,
uploadFiles,
onOpenSelectFile,
onSelectFile,
removeFiles
} = useFileUpload({
outLinkAuthData,
chatId: chatId || '',
fileSelectConfig: {
canSelectFile: input.canSelectFile ?? true,
canSelectImg: input.canSelectImg ?? false,
maxFiles: input.maxFiles ?? 5
},
// @ts-ignore
fileCtrl
});
const isDisabledInput = histories.length > 0;
useRequest2(uploadFiles, {
manual: false,
errorToast: t('common:upload_file_error'),
refreshDeps: [fileList, outLinkAuthData, chatId]
});
const hasFileUploading = useMemo(() => {
return fileList.some((item) => !item.url);
}, [fileList]);
useEffect(() => {
setUploading(hasFileUploading);
onChange(
fileList.map((item) => ({
type: item.type,
name: item.name,
url: item.url,
icon: item.icon
}))
);
}, [fileList, hasFileUploading, onChange, setUploading]);
return (
<>
<Flex alignItems={'center'}>
<Box position={'relative'}>
{input.required && (
<Box position={'absolute'} left={-2} top={'-1px'} color={'red.600'}>
*
</Box>
)}
<FormLabel fontWeight={'500'}>{t(input.label as any)}</FormLabel>
</Box>
{input.description && <QuestionTip ml={2} label={t(input.description as any)} />}
<Box flex={1} />
{/* 有历史记录,说明是已经跑过了,不能再新增了 */}
<Button
isDisabled={histories.length !== 0}
leftIcon={<MyIcon name={selectFileIcon as any} w={'16px'} />}
variant={'whiteBase'}
onClick={() => {
onOpenSelectFile();
}}
>
{t('chat:select')}
</Button>
</Flex>
<FilePreview fileList={fileList} removeFiles={isDisabledInput ? undefined : removeFiles} />
{fileList.length === 0 && <EmptyTip py={0} mt={3} text={t('chat:not_select_file')} />}
<File onSelect={(files) => onSelectFile({ files, fileList })} />
</>
);
};
const RenderPluginInput = ({
value,
onChange,
isDisabled,
isInvalid,
input
input,
setUploading
}: {
value: any;
onChange: () => void;
onChange: (...event: any[]) => void;
isDisabled?: boolean;
isInvalid: boolean;
input: FlowNodeInputItemType;
setUploading: React.Dispatch<React.SetStateAction<boolean>>;
}) => {
const { t } = useTranslation();
const inputType = input.renderTypeList[0];
@@ -44,6 +150,10 @@ const RenderPluginInput = ({
<MySelect list={input.list} value={value} onchange={onChange} isDisabled={isDisabled} />
);
}
if (inputType === FlowNodeInputTypeEnum.fileSelect) {
return <FileSelector onChange={onChange} input={input} setUploading={setUploading} />;
}
if (input.valueType === WorkflowIOValueTypeEnum.string) {
return (
<Textarea
@@ -100,22 +210,26 @@ const RenderPluginInput = ({
);
})();
return !!render ? (
<Box _notLast={{ mb: 4 }} px={1}>
<Flex alignItems={'center'} mb={1}>
<Box position={'relative'}>
{input.required && (
<Box position={'absolute'} left={-2} top={'-1px'} color={'red.600'}>
*
</Box>
)}
{t(input.label as any)}
</Box>
{input.description && <QuestionTip ml={2} label={t(input.description as any)} />}
</Flex>
return (
<Box _notLast={{ mb: 4 }}>
{/* label */}
{inputType !== FlowNodeInputTypeEnum.fileSelect && (
<Flex alignItems={'center'} mb={1}>
<Box position={'relative'}>
{input.required && (
<Box position={'absolute'} left={-2} top={'-1px'} color={'red.600'}>
*
</Box>
)}
<FormLabel fontWeight={'500'}>{t(input.label as any)}</FormLabel>
</Box>
{input.description && <QuestionTip ml={2} label={t(input.description as any)} />}
</Flex>
)}
{render}
</Box>
) : null;
);
};
export default RenderPluginInput;

View File

@@ -1,4 +1,4 @@
import React, { ReactNode, useCallback, useMemo, useRef, useState } from 'react';
import React, { ReactNode, useCallback, useMemo, useRef } from 'react';
import { createContext } from 'use-context-selector';
import { PluginRunBoxProps } from './type';
import {
@@ -8,7 +8,6 @@ import {
} from '@fastgpt/global/core/chat/type';
import { FieldValues, useForm } from 'react-hook-form';
import { PluginRunBoxTabEnum } from './constants';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import { useToast } from '@fastgpt/web/hooks/useToast';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
@@ -16,17 +15,15 @@ import { generatingMessageProps } from '../type';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { useTranslation } from 'next-i18next';
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { ChatBoxInputFormType, UserInputFileItemType } from '../ChatBox/type';
import { ChatBoxInputFormType } from '../ChatBox/type';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { getPluginRunUserQuery } from '@fastgpt/global/core/workflow/utils';
type PluginRunContextType = OutLinkChatAuthProps &
PluginRunBoxProps & {
isChatting: boolean;
onSubmit: (e: ChatBoxInputFormType, files?: UserInputFileItemType[]) => Promise<any>;
onSubmit: (e: ChatBoxInputFormType) => Promise<any>;
outLinkAuthData: OutLinkChatAuthProps;
restartInputStore?: ChatBoxInputFormType;
setRestartInputStore: React.Dispatch<React.SetStateAction<ChatBoxInputFormType | undefined>>;
};
export const PluginRunContext = createContext<PluginRunContextType>({
@@ -59,8 +56,6 @@ const PluginRunContextProvider = ({
}: PluginRunBoxProps & { children: ReactNode }) => {
const { pluginInputs, onStartChat, setHistories, histories, setTab } = props;
const [restartInputStore, setRestartInputStore] = useState<ChatBoxInputFormType>();
const { toast } = useToast();
const chatController = useRef(new AbortController());
const { t } = useTranslation();
@@ -80,9 +75,7 @@ const PluginRunContextProvider = ({
);
const variablesForm = useForm<ChatBoxInputFormType>({
defaultValues: {
files: []
}
defaultValues: {}
});
const generatingMessage = useCallback(
@@ -179,8 +172,8 @@ const PluginRunContextProvider = ({
[histories]
);
const { runAsync: onSubmit } = useRequest2(
async (e: ChatBoxInputFormType, files?: UserInputFileItemType[]) => {
const onSubmit = useCallback(
async ({ variables, files }: ChatBoxInputFormType) => {
if (!onStartChat) return;
if (isChatting) {
toast({
@@ -199,7 +192,7 @@ const PluginRunContextProvider = ({
{
...getPluginRunUserQuery({
pluginInputs,
variables: e,
variables,
files: files as RuntimeUserPromptType['files']
}),
status: 'finish'
@@ -234,10 +227,13 @@ const PluginRunContextProvider = ({
try {
const { responseData } = await onStartChat({
messages: messages,
messages,
controller: chatController.current,
generatingMessage,
variables: e
variables: {
files: files,
...variables
}
});
setHistories((state) =>
@@ -262,7 +258,18 @@ const PluginRunContextProvider = ({
})
);
}
}
},
[
abortRequest,
generatingMessage,
isChatting,
onStartChat,
pluginInputs,
setHistories,
setTab,
t,
toast
]
);
const contextValue: PluginRunContextType = {
@@ -270,9 +277,7 @@ const PluginRunContextProvider = ({
isChatting,
onSubmit,
outLinkAuthData,
variablesForm,
restartInputStore,
setRestartInputStore
variablesForm
};
return <PluginRunContext.Provider value={contextValue}>{children}</PluginRunContext.Provider>;
};

View File

@@ -18,13 +18,12 @@ const RenderFilePreview = ({
return fileList.length > 0 ? (
<Flex
maxH={'250px'}
overflowY={'auto'}
overflow={'visible'}
wrap={'wrap'}
pt={3}
userSelect={'none'}
mb={fileList.length > 0 ? 2 : 0}
pr={0.5}
gap={'6px'}
>
{fileList.map((item, index) => {
const isFile = item.type === ChatFileTypeEnum.file;
@@ -33,11 +32,8 @@ const RenderFilePreview = ({
<MyBox
key={index}
maxW={isFile ? 56 : 14}
w={isFile ? '50%' : '12.5%'}
w={isFile ? 'calc(50% - 3px)' : '12.5%'}
aspectRatio={isFile ? 4 : 1}
pr={1.5}
pb={1.5}
mb={0.5}
>
<Box
border={'sm'}

View File

@@ -28,13 +28,24 @@ export const useChat = (params?: { chatId?: string; appId: string; type?: GetCha
// Reset to empty input
const data = variablesForm.getValues();
for (const key in data) {
data[key] = '';
// Reset the old variables to empty
const resetVariables: Record<string, any> = {};
for (const key in data.variables) {
resetVariables[key] = (() => {
if (Array.isArray(data.variables[key])) {
return [];
}
return '';
})();
}
variablesForm.reset({
...data,
...variables
variables: {
...resetVariables,
...variables
}
});
},
[variablesForm]
@@ -42,8 +53,8 @@ export const useChat = (params?: { chatId?: string; appId: string; type?: GetCha
const clearChatRecords = useCallback(() => {
const data = variablesForm.getValues();
for (const key in data) {
variablesForm.setValue(key, '');
for (const key in data.variables) {
variablesForm.setValue(`variables.${key}`, '');
}
ChatBoxRef.current?.restartChat?.();

View File

@@ -387,6 +387,7 @@ const RenderList = React.memo(function RenderList({
isInvalid={errors && Object.keys(errors).includes(input.key)}
onChange={onChange}
input={input}
setUploading={() => {}}
/>
);
}}

View File

@@ -465,7 +465,8 @@ const RenderList = React.memo(function RenderList({
// Add default values to some inputs
const defaultValueMap: Record<string, any> = {
[NodeInputKeyEnum.userChatInput]: undefined
[NodeInputKeyEnum.userChatInput]: undefined,
[NodeInputKeyEnum.fileUrlList]: undefined
};
nodeList.forEach((node) => {
if (node.flowNodeType === FlowNodeTypeEnum.workflowStart) {
@@ -473,6 +474,10 @@ const RenderList = React.memo(function RenderList({
node.nodeId,
NodeOutputKeyEnum.userChatInput
];
defaultValueMap[NodeInputKeyEnum.fileUrlList] = [
node.nodeId,
NodeOutputKeyEnum.userFiles
];
}
});

View File

@@ -46,11 +46,6 @@ const InputFormEditModal = ({
const inputType = watch('type') || FlowNodeInputTypeEnum.input;
const maxLength = watch('maxLength');
const max = watch('max');
const min = watch('min');
const defaultInputValue = watch('defaultValue');
const inputTypeList = [
{
icon: 'core/workflow/inputType/input',
@@ -187,14 +182,9 @@ const InputFormEditModal = ({
type={'formInput'}
isEdit={isEdit}
inputType={inputType}
maxLength={maxLength}
max={max}
min={min}
defaultValue={defaultInputValue}
onClose={onClose}
onSubmitSuccess={onSubmitSuccess}
onSubmitError={onSubmitError}
valueType={defaultValueType}
/>
</Flex>
</MyModal>

View File

@@ -11,7 +11,6 @@ import MyIcon from '@fastgpt/web/components/common/Icon';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import { useBoolean } from 'ahooks';
import InputTypeConfig from './InputTypeConfig';
export const defaultInput: FlowNodeInputItemType = {
@@ -23,7 +22,10 @@ export const defaultInput: FlowNodeInputItemType = {
label: '',
description: '',
defaultValue: '',
list: [{ label: '', value: '' }]
list: [{ label: '', value: '' }],
maxFiles: 5,
canSelectFile: true,
canSelectImg: true
};
const FieldEditModal = ({
@@ -108,6 +110,13 @@ const FieldEditModal = ({
])
],
[
{
icon: 'core/workflow/inputType/file',
label: t('app:file_upload'),
value: [FlowNodeInputTypeEnum.fileSelect],
defaultValueType: WorkflowIOValueTypeEnum.arrayString,
description: t('app:file_upload_tip')
},
{
icon: 'core/workflow/inputType/customVariable',
label: t('common:core.workflow.inputType.custom'),
@@ -130,19 +139,10 @@ const FieldEditModal = ({
const form = useForm({
defaultValues: defaultValue
});
const { getValues, setValue, watch, reset } = form;
const { setValue, watch, reset } = form;
const renderTypeList = watch('renderTypeList');
const inputType = renderTypeList[0] || FlowNodeInputTypeEnum.reference;
const valueType = watch('valueType');
const [isToolInput, { toggle: setIsToolInput }] = useBoolean(!!getValues('toolDescription'));
const maxLength = watch('maxLength');
const max = watch('max');
const min = watch('min');
const selectValueTypeList = watch('customInputConfig.selectValueTypeList');
const defaultInputValue = watch('defaultValue');
const defaultValueType = useMemo(
() =>
@@ -190,8 +190,8 @@ const FieldEditModal = ({
}
}
// Focus remove toolDescription
if (isToolInput && data.renderTypeList.includes(FlowNodeInputTypeEnum.reference)) {
// Get toolDescription and removes the types of some unusable tools
if (data.toolDescription && data.renderTypeList.includes(FlowNodeInputTypeEnum.reference)) {
data.toolDescription = data.description;
} else {
data.toolDescription = undefined;
@@ -211,18 +211,7 @@ const FieldEditModal = ({
reset(defaultInput);
}
},
[
defaultValue.key,
defaultValueType,
isEdit,
isToolInput,
keys,
onSubmit,
t,
toast,
onClose,
reset
]
[defaultValue.key, defaultValueType, isEdit, keys, onSubmit, t, toast, onClose, reset]
);
const onSubmitError = useCallback(
(e: Object) => {
@@ -241,7 +230,7 @@ const FieldEditModal = ({
return (
<MyModal
isOpen={true}
isOpen
onClose={onClose}
iconSrc="/imgs/workflow/extract.png"
title={isEdit ? t('workflow:edit_input') : t('workflow:add_new_input')}
@@ -321,14 +310,6 @@ const FieldEditModal = ({
isEdit={isEdit}
onClose={onClose}
inputType={inputType}
maxLength={maxLength}
max={max}
min={min}
selectValueTypeList={selectValueTypeList}
defaultValue={defaultInputValue}
isToolInput={isToolInput}
setIsToolInput={setIsToolInput}
valueType={valueType}
defaultValueType={defaultValueType}
onSubmitSuccess={onSubmitSuccess}
onSubmitError={onSubmitError}

View File

@@ -3,7 +3,6 @@ import {
Button,
Flex,
FormControl,
FormLabel,
HStack,
Input,
NumberDecrementStepper,
@@ -23,7 +22,6 @@ import {
FlowNodeInputTypeEnum,
FlowValueTypeMap
} from '@fastgpt/global/core/workflow/node/constant';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import MySelect from '@fastgpt/web/components/common/MySelect';
import MultipleSelect from '@fastgpt/web/components/common/MySelect/MultipleSelect';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
@@ -36,7 +34,10 @@ import DndDrag, { Draggable } from '@fastgpt/web/components/common/DndDrag';
import MyTextarea from '@/components/common/Textarea/MyTextarea';
import MyNumberInput from '@fastgpt/web/components/common/Input/NumberInput';
type ListValueType = { id: string; value: string; label: string }[];
import ChatFunctionTip from '@/components/core/app/Tip';
import MySlider from '@/components/Slider';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
const InputTypeConfig = ({
form,
@@ -44,36 +45,18 @@ const InputTypeConfig = ({
onClose,
type,
inputType,
maxLength,
max,
min,
selectValueTypeList,
defaultValue,
isToolInput,
setIsToolInput,
valueType,
defaultValueType,
onSubmitSuccess,
onSubmitError
}: {
// Common fields
form: UseFormReturn<any>;
form: UseFormReturn<any, any>;
isEdit: boolean;
onClose: () => void;
type: 'plugin' | 'formInput' | 'variable';
inputType: FlowNodeInputTypeEnum | VariableInputEnum;
maxLength?: number;
max?: number;
min?: number;
selectValueTypeList?: WorkflowIOValueTypeEnum[];
defaultValue?: string;
// Plugin-specific fields
isToolInput?: boolean;
setIsToolInput?: () => void;
valueType?: WorkflowIOValueTypeEnum;
defaultValueType?: WorkflowIOValueTypeEnum;
// Update methods
@@ -82,9 +65,7 @@ const InputTypeConfig = ({
}) => {
const { t } = useTranslation();
const defaultListValue = { label: t('common:None'), value: '' };
const { register, setValue, handleSubmit, control, watch } = form;
const listValue: ListValueType = watch('list');
const { feConfigs } = useSystemStore();
const typeLabels = {
name: {
@@ -99,6 +80,18 @@ const InputTypeConfig = ({
}
};
const { register, setValue, handleSubmit, control, watch } = form;
const maxLength = watch('maxLength');
const max = watch('max');
const min = watch('min');
const selectValueTypeList = watch('customInputConfig.selectValueTypeList');
const defaultValue = watch('defaultValue');
const valueType = watch('valueType');
const toolDescription = watch('toolDescription');
const isToolInput = !!toolDescription;
const listValue = watch('list') ?? [];
const {
fields: selectEnums,
append: appendEnums,
@@ -166,6 +159,10 @@ const InputTypeConfig = ({
return type === 'plugin' && list.includes(inputType as FlowNodeInputTypeEnum);
}, [inputType, type]);
// File select
const maxFiles = watch('maxFiles');
const maxSelectFiles = Math.min(feConfigs?.uploadFileMaxAmount ?? 20, 50);
return (
<Stack flex={1} borderLeft={'1px solid #F0F1F6'} justifyContent={'space-between'}>
<Flex flexDirection={'column'} p={8} pb={2} gap={4} flex={'1 0 0'} overflow={'auto'}>
@@ -189,7 +186,9 @@ const InputTypeConfig = ({
bg={'myGray.50'}
placeholder={t('workflow:field_description_placeholder')}
rows={3}
{...register('description', { required: isToolInput ? true : false })}
{...register('description', {
required: showIsToolInput && isToolInput ? true : false
})}
/>
</Flex>
@@ -213,7 +212,7 @@ const InputTypeConfig = ({
</Box>
) : (
<Box fontSize={'14px'} mb={2}>
{defaultValueType}
{defaultValueType ? t(FlowValueTypeMap[defaultValueType]?.label as any) : ''}
</Box>
)}
</Flex>
@@ -236,7 +235,7 @@ const InputTypeConfig = ({
<Switch
isChecked={isToolInput}
onChange={(e) => {
setIsToolInput && setIsToolInput();
setValue('toolDescription', e.target.checked ? 'sign' : '');
}}
/>
</Flex>
@@ -341,7 +340,7 @@ const InputTypeConfig = ({
value: item.value
}))}
value={
defaultValue && listValue.map((item) => item.value).includes(defaultValue)
defaultValue && listValue.map((item: any) => item.value).includes(defaultValue)
? defaultValue
: ''
}
@@ -357,12 +356,12 @@ const InputTypeConfig = ({
{inputType === FlowNodeInputTypeEnum.addInputParam && (
<>
<Flex alignItems={'center'}>
{/* <Flex alignItems={'center'}>
<FormLabel flex={'0 0 132px'} fontWeight={'medium'}>
{t('common:core.module.Input Type')}
</FormLabel>
<Box fontSize={'14px'}>{t('workflow:only_the_reference_type_is_supported')}</Box>
</Flex>
</Flex> */}
<Box>
<HStack mb={1}>
<FormLabel fontWeight={'medium'}>{t('workflow:optional_value_type')}</FormLabel>
@@ -389,7 +388,9 @@ const InputTypeConfig = ({
.map((id) => mergedSelectEnums.find((item) => item.id === id))
.filter(Boolean) as { id: string; value: string }[];
removeEnums();
newSelectEnums.forEach((item) => appendEnums(item));
newSelectEnums.forEach((item) =>
appendEnums({ label: item.value, value: item.value })
);
// 防止最后一个元素被focus
setTimeout(() => {
@@ -505,6 +506,60 @@ const InputTypeConfig = ({
</Button>
</>
)}
{inputType === FlowNodeInputTypeEnum.fileSelect && (
<>
<Flex alignItems={'center'} minH={'40px'}>
<FormLabel flex={'0 0 132px'} fontWeight={'medium'}>
{t('app:document_upload')}
</FormLabel>
<Switch
{...register('canSelectFile', {
required: true
})}
/>
</Flex>
<Box w={'full'} minH={'40px'}>
<Flex alignItems={'center'}>
<FormLabel flex={'0 0 132px'} fontWeight={'medium'}>
{t('app:image_upload')}
</FormLabel>
<Switch
{...register('canSelectImg', {
required: true
})}
/>
</Flex>
<Flex color={'myGray.500'}>
<Box fontSize={'xs'}>{t('app:image_upload_tip')}</Box>
<ChatFunctionTip type="visionModel" />
</Flex>
</Box>
<Box>
<HStack>
<FormLabel fontWeight={'medium'}>{t('app:upload_file_max_amount')}</FormLabel>
<QuestionTip label={t('app:upload_file_max_amount_tip')} />
</HStack>
<Box mt={5}>
<MySlider
markList={[
{ label: '1', value: 1 },
{ label: `${maxSelectFiles}`, value: maxSelectFiles }
]}
width={'100%'}
min={1}
max={maxSelectFiles}
step={1}
value={maxFiles ?? 5}
onChange={(e) => {
setValue('maxFiles', e);
}}
/>
</Box>
</Box>
</>
)}
</Flex>
<Flex justify={'flex-end'} gap={3} pb={8} pr={8}>
@@ -514,10 +569,7 @@ const InputTypeConfig = ({
<Button
variant={'primaryOutline'}
fontWeight={'medium'}
onClick={handleSubmit(
(data: FlowNodeInputItemType) => onSubmitSuccess(data, 'confirm'),
onSubmitError
)}
onClick={handleSubmit((data) => onSubmitSuccess(data, 'confirm'), onSubmitError)}
w={20}
>
{t('common:common.Confirm')}
@@ -525,10 +577,7 @@ const InputTypeConfig = ({
{!isEdit && (
<Button
fontWeight={'medium'}
onClick={handleSubmit(
(data: FlowNodeInputItemType) => onSubmitSuccess(data, 'continue'),
onSubmitError
)}
onClick={handleSubmit((data) => onSubmitSuccess(data, 'continue'), onSubmitError)}
w={20}
>
{t('common:common.Continue_Adding')}
@@ -539,4 +588,4 @@ const InputTypeConfig = ({
);
};
export default React.memo(InputTypeConfig);
export default InputTypeConfig;

View File

@@ -114,46 +114,55 @@ function Instruction({ chatConfig: { instruction }, setAppDetail }: ComponentPro
}
function FileSelectConfig({ chatConfig: { fileSelectConfig }, setAppDetail }: ComponentProps) {
const { t } = useTranslation();
const onChangeNode = useContextSelector(WorkflowContext, (v) => v.onChangeNode);
const nodes = useContextSelector(WorkflowContext, (v) => v.nodes);
const pluginInputNode = nodes.find((item) => item.type === FlowNodeTypeEnum.pluginInput)!;
return (
<FileSelect
value={fileSelectConfig}
color={'myGray.600'}
fontWeight={'medium'}
fontSize={'14px'}
onChange={(e) => {
setAppDetail((state) => ({
...state,
chatConfig: {
...state.chatConfig,
fileSelectConfig: e
}
}));
<>
<FileSelect
value={fileSelectConfig}
color={'myGray.600'}
fontWeight={'medium'}
fontSize={'sm'}
onChange={(e) => {
setAppDetail((state) => ({
...state,
chatConfig: {
...state.chatConfig,
fileSelectConfig: e
}
}));
// Dynamic add or delete userFilesInput
const canUploadFiles = e.canSelectFile || e.canSelectImg;
const repeatKey = pluginInputNode?.data.outputs.find(
(item) => item.key === userFilesInput.key
);
if (canUploadFiles) {
!repeatKey &&
onChangeNode({
nodeId: pluginInputNode.id,
type: 'addOutput',
value: userFilesInput
});
} else {
repeatKey &&
onChangeNode({
nodeId: pluginInputNode.id,
type: 'delOutput',
key: userFilesInput.key
});
}
}}
/>
// Dynamic add or delete userFilesInput
const canUploadFiles = e.canSelectFile || e.canSelectImg;
const repeatKey = pluginInputNode?.data.outputs.find(
(item) => item.key === userFilesInput.key
);
if (canUploadFiles) {
!repeatKey &&
onChangeNode({
nodeId: pluginInputNode.id,
type: 'addOutput',
value: {
...userFilesInput,
label: t('workflow:plugin.global_file_input')
}
});
} else {
repeatKey &&
onChangeNode({
nodeId: pluginInputNode.id,
type: 'delOutput',
key: userFilesInput.key
});
}
}}
/>
<Box fontSize={'mini'} color={'myGray.500'}>
{t('workflow:plugin_file_abandon_tip')}
</Box>
</>
);
}

View File

@@ -142,7 +142,7 @@ const NodePluginInput = ({ data, selected }: NodeProps<FlowNodeItemType>) => {
}}
/>
</Container>
{!!outputs.filter((output) => output.type !== FlowNodeOutputTypeEnum.hidden).length && (
{outputs.length != inputs.length && (
<Container>
<IOTitle text={t('common:common.Output')} />
<RenderOutput nodeId={nodeId} flowOutputList={outputs} />

View File

@@ -55,9 +55,9 @@ const InputLabel = ({ nodeId, input }: Props) => {
{description && <QuestionTip ml={1} label={t(description as any)}></QuestionTip>}
</Flex>
{/* value type */}
{renderType === FlowNodeInputTypeEnum.reference && (
<ValueTypeLabel valueType={valueType} valueDesc={valueDesc} />
)}
{[FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.fileSelect].includes(
renderType
) && <ValueTypeLabel valueType={valueType} valueDesc={valueDesc} />}
{/* input type select */}
{renderTypeList && renderTypeList.length > 1 && (

View File

@@ -16,6 +16,10 @@ const RenderList: {
types: [FlowNodeInputTypeEnum.reference],
Component: dynamic(() => import('./templates/Reference'))
},
{
types: [FlowNodeInputTypeEnum.fileSelect],
Component: dynamic(() => import('./templates/Reference'))
},
{
types: [FlowNodeInputTypeEnum.select],
Component: dynamic(() => import('./templates/Select'))

View File

@@ -34,7 +34,6 @@ import { useChat } from '@/components/core/chat/ChatContainer/useChat';
import ChatBox from '@/components/core/chat/ChatContainer/ChatBox';
import { useSystem } from '@fastgpt/web/hooks/useSystem';
import { InitChatResponse } from '@/global/core/chat/api';
import { AppErrEnum } from '@fastgpt/global/common/error/code/app';
const CustomPluginRunBox = dynamic(() => import('./components/CustomPluginRunBox'));

View File

@@ -11,7 +11,11 @@ import {
FlowNodeInputTypeEnum,
FlowNodeTypeEnum
} from '@fastgpt/global/core/workflow/node/constant';
import { NodeInputKeyEnum, WorkflowIOValueTypeEnum } from '@fastgpt/global/core/workflow/constants';
import {
NodeInputKeyEnum,
NodeOutputKeyEnum,
WorkflowIOValueTypeEnum
} from '@fastgpt/global/core/workflow/constants';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { StoreEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
@@ -30,9 +34,11 @@ import {
AiChatQuoteTemplate
} from '@fastgpt/global/core/workflow/template/system/aiChat/index';
import { DatasetSearchModule } from '@fastgpt/global/core/workflow/template/system/datasetSearch';
import { ReadFilesNode } from '@fastgpt/global/core/workflow/template/system/readFiles';
import { i18nT } from '@fastgpt/web/i18n/utils';
import { Input_Template_UserChatInput } from '@fastgpt/global/core/workflow/template/input';
import {
Input_Template_File_Link_Prompt,
Input_Template_UserChatInput
} from '@fastgpt/global/core/workflow/template/input';
type WorkflowType = {
nodes: StoreNodeItemType[];
@@ -173,6 +179,10 @@ export function form2AppWorkflow(
valueType: WorkflowIOValueTypeEnum.datasetQuote,
value: selectedDatasets?.length > 0 ? [datasetNodeId, 'quoteQA'] : undefined
},
{
...Input_Template_File_Link_Prompt,
value: [workflowStartNodeId, NodeOutputKeyEnum.userFiles]
},
{
key: NodeInputKeyEnum.aiChatVision,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
@@ -321,44 +331,6 @@ export function form2AppWorkflow(
]
}
: null;
// Read file tool config
const readFileTool: WorkflowType | null = data.chatConfig.fileSelectConfig?.canSelectFile
? {
nodes: [
{
nodeId: ReadFilesNode.id,
name: t(ReadFilesNode.name),
intro: t(ReadFilesNode.intro),
avatar: ReadFilesNode.avatar,
flowNodeType: ReadFilesNode.flowNodeType,
showStatus: true,
position: {
x: 974.6209854328943,
y: 587.6378828744465
},
version: ReadFilesNode.version,
inputs: [
{
key: NodeInputKeyEnum.fileUrlList,
renderTypeList: [FlowNodeInputTypeEnum.reference],
valueType: WorkflowIOValueTypeEnum.arrayString,
label: t('app:workflow.file_url'),
value: [workflowStartNodeId, 'userFiles']
}
],
outputs: ReadFilesNode.outputs
}
],
edges: [
{
source: toolNodeId,
target: ReadFilesNode.id,
sourceHandle: 'selectedTools',
targetHandle: 'selectedTools'
}
]
}
: null;
// Computed tools config
const pluginTool: WorkflowType[] = formData.selectedTools.map((tool, i) => {
@@ -477,6 +449,10 @@ export function form2AppWorkflow(
max: 30,
value: formData.aiSettings.maxHistories
},
{
...Input_Template_File_Link_Prompt,
value: [workflowStartNodeId, NodeOutputKeyEnum.userFiles]
},
{
key: 'userChatInput',
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.textarea],
@@ -497,7 +473,6 @@ export function form2AppWorkflow(
},
// tool nodes
...(datasetTool ? datasetTool.nodes : []),
...(readFileTool ? readFileTool.nodes : []),
...pluginTool.map((tool) => tool.nodes).flat()
],
edges: [
@@ -509,7 +484,6 @@ export function form2AppWorkflow(
},
// tool edges
...(datasetTool ? datasetTool.edges : []),
...(readFileTool ? readFileTool.edges : []),
...pluginTool.map((tool) => tool.edges).flat()
]
};
@@ -530,8 +504,7 @@ export function form2AppWorkflow(
}
const workflow = (() => {
if (data.selectedTools.length > 0 || data.chatConfig.fileSelectConfig?.canSelectFile)
return toolTemplates(data);
if (data.selectedTools.length > 0) return toolTemplates(data);
if (selectedDatasets.length > 0) return datasetTemplate(data);
return simpleChatTemplate(data);
})();