diff --git a/docSite/content/zh-cn/docs/development/upgrading/4813.md b/docSite/content/zh-cn/docs/development/upgrading/4813.md index 255d70002..99a3997bd 100644 --- a/docSite/content/zh-cn/docs/development/upgrading/4813.md +++ b/docSite/content/zh-cn/docs/development/upgrading/4813.md @@ -7,6 +7,20 @@ toc: true weight: 811 --- +## 更新指南 + +### 1. 做好数据备份 + +### 2. 修改镜像 + +- 更新 FastGPT 镜像 tag: v4.8.13-alpha +- 更新 FastGPT 管理端镜像 tag: v4.8.13-alpha (fastgpt-pro镜像) +- Sandbox 镜像,可以不更新 + +### 3. 调整文件上传编排 + +虽然依然兼容旧版的文件上传编排,但是未来两个版本内将会去除兼容代码,请尽快调整编排,以适应最新的文件上传逻辑。尤其是嵌套应用的文件传递,未来将不会自动传递,必须手动指定传递的文件。 + ## 更新说明 1. 新增 - 数组变量选择支持多选,可以选多个数组或对应的单一数据类型,会自动按选择顺序进行合并。 @@ -24,18 +38,4 @@ weight: 811 13. 优化 - 工作流上下文拆分,性能优化。 14. 优化 - 语音播报,不支持 mediaSource 的浏览器可等待完全生成语音后输出。 15. 修复 - Dockerfile pnpm install 支持代理。 -16. 修复 - BI 图表生成无法写入文件。 - -## 更新指南 - -### 1. 做好数据备份 - -### 2. 修改镜像 - -- 更新 FastGPT 镜像 tag: v4.8.13-alpha -- 更新 FastGPT 管理端镜像 tag: v4.8.13-alpha (fastgpt-pro镜像) -- Sandbox 镜像,可以不更新 - -### 3. 调整文件上传编排 - -虽然依然兼容旧版的文件上传编排,但是未来两个版本内将会去除兼容代码,请尽快调整编排,以适应最新的文件上传逻辑。尤其是嵌套应用的文件传递,未来将不会自动传递,必须手动指定传递的文件。 +16. 修复 - BI 图表生成无法写入文件。 \ No newline at end of file diff --git a/files/docker/docker-compose-milvus.yml b/files/docker/docker-compose-milvus.yml index 4703bdddf..ccd84a7d2 100644 --- a/files/docker/docker-compose-milvus.yml +++ b/files/docker/docker-compose-milvus.yml @@ -139,6 +139,8 @@ services: - OPENAI_BASE_URL=http://oneapi:3000/v1 # AI模型的API Key。(这里默认填写了OneAPI的快速默认key,测试通后,务必及时修改) - CHAT_API_KEY=sk-fastgpt + # 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true + - MULTIPLE_DATA_TO_BASE64=false # 数据库最大连接数 - DB_MAX_LINK=30 # 登录凭证密钥 diff --git a/files/docker/docker-compose-pgvector.yml b/files/docker/docker-compose-pgvector.yml index 558fee914..9767d12f7 100644 --- a/files/docker/docker-compose-pgvector.yml +++ b/files/docker/docker-compose-pgvector.yml @@ -97,6 +97,8 @@ services: - OPENAI_BASE_URL=http://oneapi:3000/v1 # AI模型的API Key。(这里默认填写了OneAPI的快速默认key,测试通后,务必及时修改) - CHAT_API_KEY=sk-fastgpt + # 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true + - MULTIPLE_DATA_TO_BASE64=false # 数据库最大连接数 - DB_MAX_LINK=30 # 登录凭证密钥 diff --git a/files/docker/docker-compose-zilliz.yml b/files/docker/docker-compose-zilliz.yml index 6796dc7a7..b6e781b52 100644 --- a/files/docker/docker-compose-zilliz.yml +++ b/files/docker/docker-compose-zilliz.yml @@ -77,6 +77,8 @@ services: - OPENAI_BASE_URL=http://oneapi:3000/v1 # AI模型的API Key。(这里默认填写了OneAPI的快速默认key,测试通后,务必及时修改) - CHAT_API_KEY=sk-fastgpt + # 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true + - MULTIPLE_DATA_TO_BASE64=false # 数据库最大连接数 - DB_MAX_LINK=30 # 登录凭证密钥 diff --git a/packages/global/common/file/constants.ts b/packages/global/common/file/constants.ts index 23fd27a6d..9cc2201e1 100644 --- a/packages/global/common/file/constants.ts +++ b/packages/global/common/file/constants.ts @@ -19,3 +19,5 @@ export const bucketNameMap = { export const ReadFileBaseUrl = `${process.env.FE_DOMAIN || ''}/api/common/file/read`; export const documentFileType = '.txt, .docx, .csv, .xlsx, .pdf, .md, .html, .pptx'; +export const imageFileType = + '.jpg, .jpeg, .png, .gif, .bmp, .webp, .svg, .tiff, .tif, .ico, .heic, .heif, .avif'; diff --git a/packages/global/common/file/tools.ts b/packages/global/common/file/tools.ts index 97e6bc7cb..cf26762e7 100644 --- a/packages/global/common/file/tools.ts +++ b/packages/global/common/file/tools.ts @@ -1,4 +1,7 @@ import { detect } from 'jschardet'; +import { documentFileType, imageFileType } from './constants'; +import { ChatFileTypeEnum } from '../../core/chat/constants'; +import { UserChatItemValueItemType } from '../../core/chat/type'; export const formatFileSize = (bytes: number): string => { if (bytes === 0) return '0 B'; @@ -13,3 +16,39 @@ export const formatFileSize = (bytes: number): string => { export const detectFileEncoding = (buffer: Buffer) => { return detect(buffer.slice(0, 200))?.encoding?.toLocaleLowerCase(); }; + +// Url => user upload file type +export const parseUrlToFileType = (url: string): UserChatItemValueItemType['file'] | undefined => { + const parseUrl = new URL(url, 'https://locaohost:3000'); + + const filename = (() => { + // Old version file url: https://xxx.com/file/read?filename=xxx.pdf + const filenameQuery = parseUrl.searchParams.get('filename'); + if (filenameQuery) return filenameQuery; + + // Common file: https://xxx.com/xxx.pdf?xxxx=xxx + const pathname = parseUrl.pathname; + if (pathname) return pathname.split('/').pop(); + })(); + + if (!filename) return; + + const extension = filename.split('.').pop()?.toLowerCase() || ''; + + if (!extension) return; + + if (documentFileType.includes(extension)) { + return { + type: ChatFileTypeEnum.file, + name: filename, + url + }; + } + if (imageFileType.includes(extension)) { + return { + type: ChatFileTypeEnum.image, + name: filename, + url + }; + } +}; diff --git a/packages/global/core/chat/utils.ts b/packages/global/core/chat/utils.ts index a8d32e366..d3b590633 100644 --- a/packages/global/core/chat/utils.ts +++ b/packages/global/core/chat/utils.ts @@ -30,7 +30,8 @@ export const getChatTitleFromChatMessage = (message?: ChatItemType, defaultValue // Keep the first n and last n characters export const getHistoryPreview = ( completeMessages: ChatItemType[], - size = 100 + size = 100, + useVision = false ): { obj: `${ChatRoleEnum}`; value: string; @@ -48,7 +49,8 @@ export const getHistoryPreview = ( item.value ?.map((item) => { if (item?.text?.content) return item?.text?.content; - if (item.file?.type === 'image') return 'Input an image'; + if (item.file?.type === 'image' && useVision) + return `![Input an image](${item.file.url.slice(0, 100)}...)`; return ''; }) .filter(Boolean) diff --git a/packages/global/core/workflow/node/constant.ts b/packages/global/core/workflow/node/constant.ts index 54dfa55ba..e3a90725f 100644 --- a/packages/global/core/workflow/node/constant.ts +++ b/packages/global/core/workflow/node/constant.ts @@ -27,7 +27,9 @@ export enum FlowNodeInputTypeEnum { // render ui settingDatasetQuotePrompt = 'settingDatasetQuotePrompt', hidden = 'hidden', - custom = 'custom' + custom = 'custom', + + fileSelect = 'fileSelect' } export const FlowNodeInputMap: Record< FlowNodeInputTypeEnum, @@ -85,6 +87,9 @@ export const FlowNodeInputMap: Record< }, [FlowNodeInputTypeEnum.textarea]: { icon: 'core/workflow/inputType/textarea' + }, + [FlowNodeInputTypeEnum.fileSelect]: { + icon: 'core/workflow/inputType/file' } }; @@ -137,43 +142,43 @@ export enum FlowNodeTypeEnum { // node IO value type export const FlowValueTypeMap = { [WorkflowIOValueTypeEnum.string]: { - label: 'string', + label: 'String', value: WorkflowIOValueTypeEnum.string }, [WorkflowIOValueTypeEnum.number]: { - label: 'number', + label: 'Number', value: WorkflowIOValueTypeEnum.number }, [WorkflowIOValueTypeEnum.boolean]: { - label: 'boolean', + label: 'Boolean', value: WorkflowIOValueTypeEnum.boolean }, [WorkflowIOValueTypeEnum.object]: { - label: 'object', + label: 'Object', value: WorkflowIOValueTypeEnum.object }, [WorkflowIOValueTypeEnum.arrayString]: { - label: 'array', + label: 'Array', value: WorkflowIOValueTypeEnum.arrayString }, [WorkflowIOValueTypeEnum.arrayNumber]: { - label: 'array', + label: 'Array', value: WorkflowIOValueTypeEnum.arrayNumber }, [WorkflowIOValueTypeEnum.arrayBoolean]: { - label: 'array', + label: 'Array', value: WorkflowIOValueTypeEnum.arrayBoolean }, [WorkflowIOValueTypeEnum.arrayObject]: { - label: 'array', + label: 'Array', value: WorkflowIOValueTypeEnum.arrayObject }, [WorkflowIOValueTypeEnum.arrayAny]: { - label: 'array', + label: 'Array', value: WorkflowIOValueTypeEnum.arrayAny }, [WorkflowIOValueTypeEnum.any]: { - label: 'any', + label: 'Any', value: WorkflowIOValueTypeEnum.any }, [WorkflowIOValueTypeEnum.chatHistory]: { diff --git a/packages/global/core/workflow/runtime/type.d.ts b/packages/global/core/workflow/runtime/type.d.ts index 1de03d8c3..29088a77e 100644 --- a/packages/global/core/workflow/runtime/type.d.ts +++ b/packages/global/core/workflow/runtime/type.d.ts @@ -216,5 +216,7 @@ export type AIChatNodeProps = { [NodeInputKeyEnum.aiChatQuoteTemplate]?: string; [NodeInputKeyEnum.aiChatQuotePrompt]?: string; [NodeInputKeyEnum.aiChatVision]?: boolean; + [NodeInputKeyEnum.stringQuoteText]?: string; + [NodeInputKeyEnum.fileUrlList]?: string[]; }; diff --git a/packages/global/core/workflow/template/input.ts b/packages/global/core/workflow/template/input.ts index 49a0788a4..3cccc3155 100644 --- a/packages/global/core/workflow/template/input.ts +++ b/packages/global/core/workflow/template/input.ts @@ -75,10 +75,17 @@ export const Input_Template_Text_Quote: FlowNodeInputItemType = { description: i18nT('app:document_quote_tip'), valueType: WorkflowIOValueTypeEnum.string }; + +export const Input_Template_File_Link_Prompt: FlowNodeInputItemType = { + key: NodeInputKeyEnum.fileUrlList, + renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input], + label: i18nT('app:file_quote_link'), + debugLabel: i18nT('app:file_quote_link'), + valueType: WorkflowIOValueTypeEnum.arrayString +}; export const Input_Template_File_Link: FlowNodeInputItemType = { key: NodeInputKeyEnum.fileUrlList, renderTypeList: [FlowNodeInputTypeEnum.reference], - required: true, label: i18nT('app:workflow.user_file_input'), debugLabel: i18nT('app:workflow.user_file_input'), description: i18nT('app:workflow.user_file_input_desc'), diff --git a/packages/global/core/workflow/template/system/aiChat/index.ts b/packages/global/core/workflow/template/system/aiChat/index.ts index 0bc4e19ab..249210dc6 100644 --- a/packages/global/core/workflow/template/system/aiChat/index.ts +++ b/packages/global/core/workflow/template/system/aiChat/index.ts @@ -17,7 +17,8 @@ import { Input_Template_History, Input_Template_System_Prompt, Input_Template_UserChatInput, - Input_Template_Text_Quote + Input_Template_Text_Quote, + Input_Template_File_Link_Prompt } from '../../input'; import { chatNodeSystemPromptTip, systemPromptTip } from '../../tip'; import { getHandleConfig } from '../../utils'; @@ -55,7 +56,7 @@ export const AiChatModule: FlowNodeTemplateType = { showStatus: true, isTool: true, courseUrl: '/docs/workflow/modules/ai_chat/', - version: '481', + version: '4813', inputs: [ Input_Template_SettingAiModel, // --- settings modal @@ -100,7 +101,7 @@ export const AiChatModule: FlowNodeTemplateType = { }, Input_Template_History, Input_Template_Dataset_Quote, - Input_Template_Text_Quote, + Input_Template_File_Link_Prompt, { ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') } ], diff --git a/packages/global/core/workflow/template/system/readFiles/index.tsx b/packages/global/core/workflow/template/system/readFiles/index.tsx index 8018eda74..e4485fe97 100644 --- a/packages/global/core/workflow/template/system/readFiles/index.tsx +++ b/packages/global/core/workflow/template/system/readFiles/index.tsx @@ -23,7 +23,7 @@ export const ReadFilesNode: FlowNodeTemplateType = { name: i18nT('app:workflow.read_files'), intro: i18nT('app:workflow.read_files_tip'), showStatus: true, - version: '489', + version: '4812', isTool: true, inputs: [ { diff --git a/packages/global/core/workflow/template/system/tools.ts b/packages/global/core/workflow/template/system/tools.ts index bd91596d7..7526214f8 100644 --- a/packages/global/core/workflow/template/system/tools.ts +++ b/packages/global/core/workflow/template/system/tools.ts @@ -20,6 +20,7 @@ import { chatNodeSystemPromptTip, systemPromptTip } from '../tip'; import { LLMModelTypeEnum } from '../../../ai/constants'; import { getHandleConfig } from '../utils'; import { i18nT } from '../../../../../web/i18n/utils'; +import { Input_Template_File_Link_Prompt } from '../input'; export const ToolModule: FlowNodeTemplateType = { id: FlowNodeTypeEnum.tools, @@ -32,7 +33,7 @@ export const ToolModule: FlowNodeTemplateType = { intro: i18nT('workflow:template.tool_call_intro'), showStatus: true, courseUrl: '/docs/workflow/modules/tool/', - version: '481', + version: '4813', inputs: [ { ...Input_Template_SettingAiModel, @@ -57,7 +58,7 @@ export const ToolModule: FlowNodeTemplateType = { renderTypeList: [FlowNodeInputTypeEnum.hidden], label: '', valueType: WorkflowIOValueTypeEnum.boolean, - value: true + value: false }, { @@ -67,6 +68,7 @@ export const ToolModule: FlowNodeTemplateType = { placeholder: chatNodeSystemPromptTip }, Input_Template_History, + Input_Template_File_Link_Prompt, Input_Template_UserChatInput ], outputs: [ diff --git a/packages/global/core/workflow/type/io.d.ts b/packages/global/core/workflow/type/io.d.ts index eb46e6dda..9e5699416 100644 --- a/packages/global/core/workflow/type/io.d.ts +++ b/packages/global/core/workflow/type/io.d.ts @@ -56,6 +56,11 @@ export type FlowNodeInputItemType = InputComponentPropsType & { canEdit?: boolean; // dynamic inputs isPro?: boolean; // Pro version field isToolOutput?: boolean; + + // file + canSelectFile?: boolean; + canSelectImg?: boolean; + maxFiles?: number; }; export type FlowNodeOutputItemType = { diff --git a/packages/global/core/workflow/utils.ts b/packages/global/core/workflow/utils.ts index 6fbb98138..4b5cc0e43 100644 --- a/packages/global/core/workflow/utils.ts +++ b/packages/global/core/workflow/utils.ts @@ -32,6 +32,7 @@ import { IfElseResultEnum } from './template/system/ifElse/constant'; import { RuntimeNodeItemType } from './runtime/type'; import { getReferenceVariableValue } from './runtime/utils'; import { + Input_Template_File_Link, Input_Template_History, Input_Template_Stream_MODE, Input_Template_UserChatInput @@ -261,8 +262,10 @@ export const appData2FlowNodeIO = ({ inputs: [ Input_Template_Stream_MODE, Input_Template_History, + ...(chatConfig?.fileSelectConfig?.canSelectFile || chatConfig?.fileSelectConfig?.canSelectImg + ? [Input_Template_File_Link] + : []), Input_Template_UserChatInput, - // ...(showFileLink ? [Input_Template_File_Link] : []), ...variableInput ], outputs: [ diff --git a/packages/service/core/chat/utils.ts b/packages/service/core/chat/utils.ts index 56b74aa47..d6097d55d 100644 --- a/packages/service/core/chat/utils.ts +++ b/packages/service/core/chat/utils.ts @@ -109,7 +109,7 @@ export const loadRequestMessages = async ({ } return Promise.all( messages.map(async (item) => { - if (item.type === 'image_url') { + if (item.type === 'image_url' && process.env.MULTIPLE_DATA_TO_BASE64 === 'true') { // Remove url origin const imgUrl = (() => { if (origin && item.image_url.url.startsWith(origin)) { @@ -149,7 +149,7 @@ export const loadRequestMessages = async ({ }; // Split question text and image const parseStringWithImages = (input: string): ChatCompletionContentPart[] => { - if (!useVision) { + if (!useVision || input.length > 500) { return [{ type: 'text', text: input || '' }]; } @@ -170,8 +170,8 @@ export const loadRequestMessages = async ({ }); }); - // Too many images or too long text, return text - if (httpsImages.length > 4 || input.length > 1000) { + // Too many images return text + if (httpsImages.length > 4) { return [{ type: 'text', text: input || '' }]; } @@ -179,7 +179,7 @@ export const loadRequestMessages = async ({ result.push({ type: 'text', text: input }); return result; }; - // Parse user content(text and img) + // Parse user content(text and img) Store history => api messages const parseUserContent = async (content: string | ChatCompletionContentPart[]) => { if (typeof content === 'string') { return loadImageToBase64(parseStringWithImages(content)); diff --git a/packages/service/core/workflow/dispatch/agent/runTool/index.ts b/packages/service/core/workflow/dispatch/agent/runTool/index.ts index 74bfa25e0..7463ba583 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/index.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/index.ts @@ -25,45 +25,16 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools'; import { getMultiplePrompt, Prompt_Tool_Call } from './constants'; import { filterToolResponseToPreview } from './utils'; import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; +import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles'; +import { parseUrlToFileType } from '@fastgpt/global/common/file/tools'; +import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat'; +import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant'; type Response = DispatchNodeResultType<{ [NodeOutputKeyEnum.answerText]: string; [DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType; }>; -/* - Tool call, auth add file prompt to question。 - Guide the LLM to call tool. -*/ -export const toolCallMessagesAdapt = ({ - userInput -}: { - userInput: UserChatItemValueItemType[]; -}) => { - const files = userInput.filter((item) => item.type === 'file'); - - if (files.length > 0) { - return userInput.map((item) => { - if (item.type === 'text') { - const filesCount = files.filter((file) => file.file?.type === 'file').length; - const imgCount = files.filter((file) => file.file?.type === 'image').length; - const text = item.text?.content || ''; - - return { - ...item, - text: { - content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text }) - } - }; - } - - return item; - }); - } - - return userInput; -}; - export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise => { const { node: { nodeId, name, isEntry }, @@ -71,11 +42,21 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< runtimeEdges, histories, query, - - params: { model, systemPrompt, userChatInput, history = 6 } + requestOrigin, + chatConfig, + runningAppInfo: { teamId }, + params: { + model, + systemPrompt, + userChatInput, + history = 6, + fileUrlList: fileLinks, + aiChatVision + } } = props; const toolModel = getLLMModel(model); + const useVision = aiChatVision && toolModel.vision; const chatHistories = getHistories(history, histories); const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges }); @@ -109,18 +90,43 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< } })(); props.node.isEntry = false; + const hasReadFilesTool = toolNodes.some( + (item) => item.flowNodeType === FlowNodeTypeEnum.readFiles + ); + + const globalFiles = chatValue2RuntimePrompt(query).files; + const { documentQuoteText, userFiles } = await getMultiInput({ + histories, + requestOrigin, + maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20, + teamId, + fileLinks, + inputFiles: globalFiles + }); + + const concatenateSystemPrompt = [ + toolModel.defaultSystemChatPrompt, + systemPrompt, + documentQuoteText + ? replaceVariable(Prompt_DocumentQuote, { + quote: documentQuoteText + }) + : '' + ] + .filter(Boolean) + .join('\n\n===---===---===\n\n'); const messages: ChatItemType[] = (() => { const value: ChatItemType[] = [ - ...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt), - ...getSystemPrompt_ChatItemType(systemPrompt), + ...getSystemPrompt_ChatItemType(concatenateSystemPrompt), // Add file input prompt to histories ...chatHistories.map((item) => { if (item.obj === ChatRoleEnum.Human) { return { ...item, value: toolCallMessagesAdapt({ - userInput: item.value + userInput: item.value, + skip: !hasReadFilesTool }) }; } @@ -129,9 +135,10 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< { obj: ChatRoleEnum.Human, value: toolCallMessagesAdapt({ + skip: !hasReadFilesTool, userInput: runtimePrompt2ChatsValue({ text: userChatInput, - files: chatValue2RuntimePrompt(query).files + files: userFiles }) }) } @@ -237,7 +244,11 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0), model: modelName, query: userChatInput, - historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000), + historyPreview: getHistoryPreview( + GPTMessages2Chats(completeMessages, false), + 10000, + useVision + ), toolDetail: childToolResponse, mergeSignId: nodeId }, @@ -253,3 +264,88 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< [DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse }; }; + +const getMultiInput = async ({ + histories, + fileLinks, + requestOrigin, + maxFiles, + teamId, + inputFiles +}: { + histories: ChatItemType[]; + fileLinks?: string[]; + requestOrigin?: string; + maxFiles: number; + teamId: string; + inputFiles: UserChatItemValueItemType['file'][]; +}) => { + // Not file quote + if (!fileLinks) { + return { + documentQuoteText: '', + userFiles: inputFiles + }; + } + + const filesFromHistories = getHistoryFileLinks(histories); + const urls = [...fileLinks, ...filesFromHistories]; + + if (urls.length === 0) { + return { + documentQuoteText: '', + userFiles: [] + }; + } + + // Get files from histories + const { text } = await getFileContentFromLinks({ + // Concat fileUrlList and filesFromHistories; remove not supported files + urls, + requestOrigin, + maxFiles, + teamId + }); + + return { + documentQuoteText: text, + userFiles: fileLinks.map((url) => parseUrlToFileType(url)) + }; +}; + +/* +Tool call, auth add file prompt to question。 +Guide the LLM to call tool. +*/ +const toolCallMessagesAdapt = ({ + userInput, + skip +}: { + userInput: UserChatItemValueItemType[]; + skip?: boolean; +}) => { + if (skip) return userInput; + + const files = userInput.filter((item) => item.type === 'file'); + + if (files.length > 0) { + return userInput.map((item) => { + if (item.type === 'text') { + const filesCount = files.filter((file) => file.file?.type === 'file').length; + const imgCount = files.filter((file) => file.file?.type === 'image').length; + const text = item.text?.content || ''; + + return { + ...item, + text: { + content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text }) + } + }; + } + + return item; + }); + } + + return userInput; +}; diff --git a/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts b/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts index 418be1aa4..ad2866c13 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts @@ -21,6 +21,7 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{ [NodeInputKeyEnum.aiChatTemperature]: number; [NodeInputKeyEnum.aiChatMaxToken]: number; [NodeInputKeyEnum.aiChatVision]?: boolean; + [NodeInputKeyEnum.fileUrlList]?: string[]; }> & { messages: ChatCompletionMessageParam[]; toolNodes: ToolNodeItemType[]; diff --git a/packages/service/core/workflow/dispatch/chat/oneapi.ts b/packages/service/core/workflow/dispatch/chat/oneapi.ts index 2d28bf769..3605f1a29 100644 --- a/packages/service/core/workflow/dispatch/chat/oneapi.ts +++ b/packages/service/core/workflow/dispatch/chat/oneapi.ts @@ -46,6 +46,8 @@ import { WorkflowResponseType } from '../type'; import { formatTime2YMDHM } from '@fastgpt/global/common/string/time'; import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type'; import { getErrText } from '@fastgpt/global/common/error/utils'; +import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles'; +import { parseUrlToFileType } from '@fastgpt/global/common/file/tools'; export type ChatProps = ModuleDispatchProps< AIChatNodeProps & { @@ -69,7 +71,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise { // censor model and system key @@ -132,22 +148,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise + if (stringQuoteText) { + return { + documentQuoteText: stringQuoteText, + userFiles: inputFiles + }; + } + + // 没有引用文件参考,但是可能用了图片识别 + if (!fileLinks) { + return { + documentQuoteText: '', + userFiles: inputFiles + }; + } + // 旧版本适配<==== + + // If fileLinks params is not empty, it means it is a new version, not get the global file. + + // Get files from histories + const filesFromHistories = getHistoryFileLinks(histories); + const urls = [...fileLinks, ...filesFromHistories]; + + if (urls.length === 0) { + return { + documentQuoteText: '', + userFiles: [] + }; + } + + const { text } = await getFileContentFromLinks({ + // Concat fileUrlList and filesFromHistories; remove not supported files + urls, + requestOrigin, + maxFiles, + teamId + }); + + return { + documentQuoteText: text, + userFiles: fileLinks.map((url) => parseUrlToFileType(url)) + }; +} + async function getChatMessages({ + model, aiChatQuoteRole, datasetQuotePrompt = '', datasetQuoteText, @@ -310,10 +380,10 @@ async function getChatMessages({ histories = [], systemPrompt, userChatInput, - inputFiles, - model, - stringQuoteText + userFiles, + documentQuoteText }: { + model: LLMModelItemType; // dataset quote aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt datasetQuotePrompt?: string; @@ -323,10 +393,11 @@ async function getChatMessages({ histories: ChatItemType[]; systemPrompt: string; userChatInput: string; - inputFiles: UserChatItemValueItemType['file'][]; - model: LLMModelItemType; - stringQuoteText?: string; // file quote + + userFiles: UserChatItemValueItemType['file'][]; + documentQuoteText?: string; // document quote }) { + // Dataset prompt ====> // User role or prompt include question const quoteRole = aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system'; @@ -337,6 +408,7 @@ async function getChatMessages({ ? Prompt_userQuotePromptList[0].value : Prompt_systemQuotePromptList[0].value; + // Reset user input, add dataset quote to user input const replaceInputValue = useDatasetQuote && quoteRole === 'user' ? replaceVariable(datasetQuotePromptTemplate, { @@ -344,31 +416,33 @@ async function getChatMessages({ question: userChatInput }) : userChatInput; + // Dataset prompt <==== - const replaceSystemPrompt = + // Concat system prompt + const concatenateSystemPrompt = [ + model.defaultSystemChatPrompt, + systemPrompt, useDatasetQuote && quoteRole === 'system' - ? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable( - datasetQuotePromptTemplate, - { - quote: datasetQuoteText - } - )}` - : systemPrompt; + ? replaceVariable(datasetQuotePromptTemplate, { + quote: datasetQuoteText + }) + : '', + documentQuoteText + ? replaceVariable(Prompt_DocumentQuote, { + quote: documentQuoteText + }) + : '' + ] + .filter(Boolean) + .join('\n\n===---===---===\n\n'); const messages: ChatItemType[] = [ - ...getSystemPrompt_ChatItemType(replaceSystemPrompt), - ...(stringQuoteText // file quote - ? getSystemPrompt_ChatItemType( - replaceVariable(Prompt_DocumentQuote, { - quote: stringQuoteText - }) - ) - : []), + ...getSystemPrompt_ChatItemType(concatenateSystemPrompt), ...histories, { obj: ChatRoleEnum.Human, value: runtimePrompt2ChatsValue({ - files: inputFiles, + files: userFiles, text: replaceInputValue }) } diff --git a/packages/service/core/workflow/dispatch/plugin/runApp.ts b/packages/service/core/workflow/dispatch/plugin/runApp.ts index e4f31c6b4..6aa2e0804 100644 --- a/packages/service/core/workflow/dispatch/plugin/runApp.ts +++ b/packages/service/core/workflow/dispatch/plugin/runApp.ts @@ -17,12 +17,14 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty import { authAppByTmbId } from '../../../../support/permission/app/auth'; import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; import { getAppVersionById } from '../../../app/version/controller'; +import { parseUrlToFileType } from '@fastgpt/global/common/file/tools'; type Props = ModuleDispatchProps<{ [NodeInputKeyEnum.userChatInput]: string; [NodeInputKeyEnum.history]?: ChatItemType[] | number; [NodeInputKeyEnum.fileUrlList]?: string[]; [NodeInputKeyEnum.forbidStream]?: boolean; + [NodeInputKeyEnum.fileUrlList]?: string[]; }>; type Response = DispatchNodeResultType<{ [NodeOutputKeyEnum.answerText]: string; @@ -40,8 +42,24 @@ export const dispatchRunAppNode = async (props: Props): Promise => { variables } = props; - const { system_forbid_stream = false, userChatInput, history, ...childrenAppVariables } = params; - if (!userChatInput) { + const { + system_forbid_stream = false, + userChatInput, + history, + fileUrlList, + ...childrenAppVariables + } = params; + const { files } = chatValue2RuntimePrompt(query); + + const userInputFiles = (() => { + if (fileUrlList) { + return fileUrlList.map((url) => parseUrlToFileType(url)); + } + // Adapt version 4.8.13 upgrade + return files; + })(); + + if (!userChatInput && !userInputFiles) { return Promise.reject('Input is empty'); } if (!appId) { @@ -72,7 +90,6 @@ export const dispatchRunAppNode = async (props: Props): Promise => { } const chatHistories = getHistories(history, histories); - const { files } = chatValue2RuntimePrompt(query); // Rewrite children app variables const systemVariables = filterSystemVariables(variables); @@ -102,7 +119,7 @@ export const dispatchRunAppNode = async (props: Props): Promise => { histories: chatHistories, variables: childrenRunVariables, query: runtimePrompt2ChatsValue({ - files, + files: userInputFiles, text: userChatInput }), chatConfig diff --git a/packages/service/core/workflow/dispatch/plugin/runInput.ts b/packages/service/core/workflow/dispatch/plugin/runInput.ts index 6760d8fc3..d82727ea2 100644 --- a/packages/service/core/workflow/dispatch/plugin/runInput.ts +++ b/packages/service/core/workflow/dispatch/plugin/runInput.ts @@ -1,4 +1,5 @@ import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt'; +import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants'; import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants'; import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants'; import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type'; @@ -11,6 +12,24 @@ export const dispatchPluginInput = (props: PluginInputProps) => { const { params, query } = props; const { files } = chatValue2RuntimePrompt(query); + /* + 对 params 中文件类型数据进行处理 + * 插件单独运行时,这里会是一个特殊的数组 + * 插件调用的话,这个参数是一个 string[] 不会进行处理 + * 硬性要求:API 单独调用插件时,要避免这种特殊类型冲突 + */ + for (const key in params) { + const val = params[key]; + if ( + Array.isArray(val) && + val.every( + (item) => item.type === ChatFileTypeEnum.file || item.type === ChatFileTypeEnum.image + ) + ) { + params[key] = val.map((item) => item.url); + } + } + return { ...params, [DispatchNodeResponseKeyEnum.nodeResponse]: {}, diff --git a/packages/service/core/workflow/dispatch/tools/readFiles.ts b/packages/service/core/workflow/dispatch/tools/readFiles.ts index 31f8db7e9..777d267ce 100644 --- a/packages/service/core/workflow/dispatch/tools/readFiles.ts +++ b/packages/service/core/workflow/dispatch/tools/readFiles.ts @@ -2,16 +2,15 @@ import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runti import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type'; import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants'; import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type'; -import { documentFileType } from '@fastgpt/global/common/file/constants'; import axios from 'axios'; import { serverRequestBaseUrl } from '../../../../common/api/serverRequest'; import { MongoRawTextBuffer } from '../../../../common/buffer/rawText/schema'; import { readFromSecondary } from '../../../../common/mongo/utils'; import { getErrText } from '@fastgpt/global/common/error/utils'; -import { detectFileEncoding } from '@fastgpt/global/common/file/tools'; +import { detectFileEncoding, parseUrlToFileType } from '@fastgpt/global/common/file/tools'; import { readRawContentByFileBuffer } from '../../../../common/file/read/utils'; import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; -import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type'; +import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type'; import { parseFileExtensionFromUrl } from '@fastgpt/global/common/string/tools'; type Props = ModuleDispatchProps<{ @@ -48,12 +47,41 @@ export const dispatchReadFiles = async (props: Props): Promise => { runningAppInfo: { teamId }, histories, chatConfig, + node: { version }, params: { fileUrlList = [] } } = props; const maxFiles = chatConfig?.fileSelectConfig?.maxFiles || 20; // Get files from histories - const filesFromHistories = histories + const filesFromHistories = version !== '489' ? [] : getHistoryFileLinks(histories); + + const { text, readFilesResult } = await getFileContentFromLinks({ + // Concat fileUrlList and filesFromHistories; remove not supported files + urls: [...fileUrlList, ...filesFromHistories], + requestOrigin, + maxFiles, + teamId + }); + + return { + [NodeOutputKeyEnum.text]: text, + [DispatchNodeResponseKeyEnum.nodeResponse]: { + readFiles: readFilesResult.map((item) => ({ + name: item?.filename || '', + url: item?.url || '' + })), + readFilesResult: readFilesResult + .map((item) => item?.nodeResponsePreviewText ?? '') + .join('\n******\n') + }, + [DispatchNodeResponseKeyEnum.toolResponses]: { + fileContent: text + } + }; +}; + +export const getHistoryFileLinks = (histories: ChatItemType[]) => { + return histories .filter((item) => { if (item.obj === ChatRoleEnum.Human) { return item.value.filter((value) => value.type === 'file'); @@ -70,26 +98,27 @@ export const dispatchReadFiles = async (props: Props): Promise => { return files; }) .flat(); +}; - // Concat fileUrlList and filesFromHistories; remove not supported files - const parseUrlList = [...fileUrlList, ...filesFromHistories] +export const getFileContentFromLinks = async ({ + urls, + requestOrigin, + maxFiles, + teamId +}: { + urls: string[]; + requestOrigin?: string; + maxFiles: number; + teamId: string; +}) => { + const parseUrlList = urls .map((url) => { try { - // Avoid "/api/xxx" file error. - const origin = requestOrigin ?? 'http://localhost:3000'; - // Check is system upload file if (url.startsWith('/') || (requestOrigin && url.startsWith(requestOrigin))) { // Parse url, get filename query. Keep only documents that can be parsed - const parseUrl = new URL(url, origin); - const filenameQuery = parseUrl.searchParams.get('filename'); - - // Not document - if (filenameQuery) { - const extensionQuery = filenameQuery.split('.').pop()?.toLowerCase() || ''; - if (!documentFileType.includes(extensionQuery)) { - return ''; - } + if (parseUrlToFileType(url)?.type !== 'file') { + return ''; } // Remove the origin(Make intranet requests directly) @@ -197,18 +226,7 @@ export const dispatchReadFiles = async (props: Props): Promise => { const text = readFilesResult.map((item) => item?.text ?? '').join('\n******\n'); return { - [NodeOutputKeyEnum.text]: text, - [DispatchNodeResponseKeyEnum.nodeResponse]: { - readFiles: readFilesResult.map((item) => ({ - name: item?.filename || '', - url: item?.url || '' - })), - readFilesResult: readFilesResult - .map((item) => item?.nodeResponsePreviewText ?? '') - .join('\n******\n') - }, - [DispatchNodeResponseKeyEnum.toolResponses]: { - fileContent: text - } + text, + readFilesResult }; }; diff --git a/packages/service/core/workflow/utils.ts b/packages/service/core/workflow/utils.ts index 327cae656..6e5ec8cfb 100644 --- a/packages/service/core/workflow/utils.ts +++ b/packages/service/core/workflow/utils.ts @@ -1,14 +1,5 @@ import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type'; import { countPromptTokens } from '../../common/string/tiktoken/index'; -import { getNanoid } from '@fastgpt/global/common/string/tools'; -import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; -import { - getPluginInputsFromStoreNodes, - getPluginRunContent -} from '@fastgpt/global/core/app/plugin/utils'; -import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node'; -import { RuntimeUserPromptType, UserChatItemType } from '@fastgpt/global/core/chat/type'; -import { runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt'; /* filter search result */ export const filterSearchResultsByMaxChars = async ( diff --git a/packages/web/i18n/en/app.json b/packages/web/i18n/en/app.json index a72e84fa0..f138befb3 100644 --- a/packages/web/i18n/en/app.json +++ b/packages/web/i18n/en/app.json @@ -40,6 +40,7 @@ "export_config_successful": "Configuration copied, some sensitive information automatically filtered. Please check for any remaining sensitive data.", "export_configs": "Export Configurations", "feedback_count": "User Feedback", + "file_quote_link": "Files", "file_recover": "File will overwrite current content", "file_upload": "File Upload", "file_upload_tip": "Once enabled, documents/images can be uploaded. Documents are retained for 7 days, images for 15 days. Using this feature may incur additional costs. To ensure a good experience, please choose an AI model with a larger context length when using this feature.", @@ -47,7 +48,7 @@ "go_to_chat": "Go to Conversation", "go_to_run": "Go to Execution", "image_upload": "Image Upload", - "image_upload_tip": "Please ensure to select a vision model that can process images.", + "image_upload_tip": "How to activate model image recognition capabilities", "import_configs": "Import Configurations", "import_configs_failed": "Import configuration failed, please ensure the configuration is correct!", "import_configs_success": "Import Successful", @@ -61,7 +62,7 @@ "intro": "A comprehensive model application orchestration system that offers out-of-the-box data processing and model invocation capabilities. It allows for rapid Dataset construction and workflow orchestration through Flow visualization, enabling complex Dataset scenarios!", "llm_not_support_vision": "This model does not support image recognition", "llm_use_vision": "Enable Image Recognition", - "llm_use_vision_tip": "Once image recognition is enabled, this model will automatically receive images uploaded from the 'dialog box' and image links in 'user questions'.", + "llm_use_vision_tip": "After clicking on the model selection, you can see whether the model supports image recognition and the ability to control whether to start image recognition. \nAfter starting image recognition, the model will read the image content in the file link, and if the user question is less than 500 words, it will automatically parse the image in the user question.", "logs_chat_user": "user", "logs_empty": "No logs yet~", "logs_message_total": "Total Messages", @@ -74,6 +75,7 @@ "month.unit": "Day", "move_app": "Move Application", "not_json_file": "Please select a JSON file", + "open_vision_function_tip": "Models with icon switches have image recognition capabilities. \nAfter being turned on, the model will parse the pictures in the file link and automatically parse the pictures in the user's question (user question ≤ 500 words).", "or_drag_JSON": "or drag in JSON file", "paste_config": "Paste Configuration", "permission.des.manage": "Based on write permissions, you can configure publishing channels, view conversation logs, and assign permissions to the application.", @@ -135,7 +137,7 @@ "version_back": "Revert to Original State", "version_copy": "Duplicate", "version_initial_copy": "Duplicate - Original State", - "vision_model_title": "Enable Image Recognition", + "vision_model_title": "Image recognition ability", "week.Friday": "Friday", "week.Monday": "Monday", "week.Saturday": "Saturday", diff --git a/packages/web/i18n/en/chat.json b/packages/web/i18n/en/chat.json index 6c5c7c5e7..7a419671d 100644 --- a/packages/web/i18n/en/chat.json +++ b/packages/web/i18n/en/chat.json @@ -29,6 +29,7 @@ "multiple_AI_conversations": "Multiple AI Conversations", "new_input_guide_lexicon": "New Lexicon", "no_workflow_response": "No workflow data", + "not_select_file": "No file selected", "plugins_output": "Plugin Output", "question_tip": "From top to bottom, the response order of each module", "response.node_inputs": "Node Inputs", @@ -40,4 +41,4 @@ "upload": "Upload", "view_citations": "View References", "web_site_sync": "Web Site Sync" -} \ No newline at end of file +} diff --git a/packages/web/i18n/en/workflow.json b/packages/web/i18n/en/workflow.json index 48ded9c3c..4b54ff34f 100644 --- a/packages/web/i18n/en/workflow.json +++ b/packages/web/i18n/en/workflow.json @@ -122,6 +122,8 @@ "pass_returned_object_as_output_to_next_nodes": "Pass the object returned in the code as output to the next nodes. The variable name needs to correspond to the return key.", "plugin.Instruction_Tip": "You can configure an instruction to explain the purpose of the plugin. This instruction will be displayed each time the plugin is used. Supports standard Markdown syntax.", "plugin.Instructions": "Instructions", + "plugin.global_file_input": "File links (deprecated)", + "plugin_file_abandon_tip": "Plugin global file upload has been deprecated, please adjust it as soon as possible. \nRelated functions can be achieved through plug-in input and adding image type input.", "plugin_input": "Plugin Input", "plugin_output_tool": "When the plug-in is executed as a tool, whether this field responds as a result of the tool", "question_classification": "Question Classification", @@ -190,4 +192,4 @@ "workflow.Switch_success": "Switch Successful", "workflow.Team cloud": "Team Cloud", "workflow.exit_tips": "Your changes have not been saved. 'Exit directly' will not save your edits." -} \ No newline at end of file +} diff --git a/packages/web/i18n/zh/app.json b/packages/web/i18n/zh/app.json index 1e1e52100..59d062c62 100644 --- a/packages/web/i18n/zh/app.json +++ b/packages/web/i18n/zh/app.json @@ -40,6 +40,7 @@ "export_config_successful": "已复制配置,自动过滤部分敏感信息,请注意检查是否仍有敏感数据", "export_configs": "导出配置", "feedback_count": "用户反馈", + "file_quote_link": "文件链接", "file_recover": "文件将覆盖当前内容", "file_upload": "文件上传", "file_upload_tip": "开启后,可以上传文档/图片。文档保留7天,图片保留15天。使用该功能可能产生较多额外费用。为保证使用体验,使用该功能时,请选择上下文长度较大的AI模型。", @@ -47,7 +48,7 @@ "go_to_chat": "去对话", "go_to_run": "去运行", "image_upload": "图片上传", - "image_upload_tip": "请确保选择可处理图片的视觉模型", + "image_upload_tip": "如何启动模型图片识别能力", "import_configs": "导入配置", "import_configs_failed": "导入配置失败,请确保配置正常!", "import_configs_success": "导入成功", @@ -60,8 +61,8 @@ "interval.per_hour": "每小时", "intro": "是一个大模型应用编排系统,提供开箱即用的数据处理、模型调用等能力,可以快速的构建知识库并通过 Flow 可视化进行工作流编排,实现复杂的知识库场景!", "llm_not_support_vision": "该模型不支持图片识别", - "llm_use_vision": "启用图片识别", - "llm_use_vision_tip": "启用图片识别后,该模型会自动接收来自“对话框上传”的图片,以及“用户问题”中的图片链接。", + "llm_use_vision": "图片识别", + "llm_use_vision_tip": "点击模型选择后,可以看到模型是否支持图片识别以及控制是否启动图片识别的能力。启动图片识别后,模型会读取文件链接里图片内容,并且如果用户问题少于 500 字,会自动解析用户问题中的图片。", "logs_chat_user": "使用者", "logs_empty": "还没有日志噢~", "logs_message_total": "消息总数", @@ -72,9 +73,10 @@ "module.type": "\"{{type}}\"类型\n{{description}}", "modules.Title is required": "模块名不能为空", "month.unit": "号", - "move_app": "移动应用", "move.hint": "移动后,所选应用/文件夹将继承新文件夹的权限设置,原先的权限设置失效。", + "move_app": "移动应用", "not_json_file": "请选择JSON文件", + "open_vision_function_tip": "有图示开关的模型即拥有图片识别能力。若开启,模型会解析文件链接里的图片,并自动解析用户问题中的图片(用户问题≤500字时生效)。", "or_drag_JSON": "或拖入JSON文件", "paste_config": "粘贴配置", "permission.des.manage": "写权限基础上,可配置发布渠道、查看对话日志、分配该应用权限", @@ -136,7 +138,7 @@ "version_back": "回到初始状态", "version_copy": "副本", "version_initial_copy": "副本-初始状态", - "vision_model_title": "启用图片识别", + "vision_model_title": "图片识别能力", "week.Friday": "星期五", "week.Monday": "星期一", "week.Saturday": "星期六", diff --git a/packages/web/i18n/zh/chat.json b/packages/web/i18n/zh/chat.json index ef327cfdf..7dea64c4e 100644 --- a/packages/web/i18n/zh/chat.json +++ b/packages/web/i18n/zh/chat.json @@ -29,6 +29,7 @@ "multiple_AI_conversations": "多组 AI 对话", "new_input_guide_lexicon": "新词库", "no_workflow_response": "没有运行数据", + "not_select_file": "未选择文件", "plugins_output": "插件输出", "question_tip": "从上到下,为各个模块的响应顺序", "response.child total points": "子工作流积分消耗", @@ -41,4 +42,4 @@ "upload": "上传", "view_citations": "查看引用", "web_site_sync": "Web站点同步" -} \ No newline at end of file +} diff --git a/packages/web/i18n/zh/workflow.json b/packages/web/i18n/zh/workflow.json index f1b6a116e..e5f964d0d 100644 --- a/packages/web/i18n/zh/workflow.json +++ b/packages/web/i18n/zh/workflow.json @@ -123,6 +123,8 @@ "pass_returned_object_as_output_to_next_nodes": "将代码中 return 的对象作为输出,传递给后续的节点。变量名需要对应 return 的 key", "plugin.Instruction_Tip": "可以配置一段说明,以解释该插件的用途。每次使用插件前,会显示该段说明。支持标准 Markdown 语法。", "plugin.Instructions": "使用说明", + "plugin.global_file_input": "文件链接(弃用)", + "plugin_file_abandon_tip": "插件全局文件上传已弃用,请尽快调整。可以通过插件输入,添加图片类型输入来实现相关功能。", "plugin_input": "插件输入", "plugin_output_tool": "插件作为工具执行时,该字段是否作为工具响应结果", "question_classification": "问题分类", @@ -192,4 +194,4 @@ "workflow.Switch_success": "切换成功", "workflow.Team cloud": "团队云端", "workflow.exit_tips": "您的更改尚未保存,「直接退出」将不会保存您的编辑记录。" -} \ No newline at end of file +} diff --git a/projects/app/.env.template b/projects/app/.env.template index 0bc4cbbc1..c6eba736a 100644 --- a/projects/app/.env.template +++ b/projects/app/.env.template @@ -16,6 +16,9 @@ OPENAI_BASE_URL=https://api.openai.com/v1 # 通用key。可以是 openai 的也可以是 oneapi 的。 # 此处逻辑:优先走 ONEAPI_URL,如果填写了 ONEAPI_URL,key 也需要是 ONEAPI 的 key CHAT_API_KEY=sk-xxxx +# 是否将图片转成 base64 传递给模型,本地开发和内网环境使用共有模型时候需要设置为 true +MULTIPLE_DATA_TO_BASE64=true + # mongo 数据库连接参数,本地开发连接远程数据库时,可能需要增加 directConnection=true 参数,才能连接上。 MONGODB_URI=mongodb://username:password@0.0.0.0:27017/fastgpt?authSource=admin diff --git a/projects/app/public/imgs/app/visionModel.png b/projects/app/public/imgs/app/visionModel.png deleted file mode 100644 index 16cc046aa..000000000 Binary files a/projects/app/public/imgs/app/visionModel.png and /dev/null differ diff --git a/projects/app/public/imgs/app/visionModel.svg b/projects/app/public/imgs/app/visionModel.svg new file mode 100644 index 000000000..0f6ba2ed9 --- /dev/null +++ b/projects/app/public/imgs/app/visionModel.svg @@ -0,0 +1,207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/projects/app/src/components/core/app/Tip.tsx b/projects/app/src/components/core/app/Tip.tsx index 7fa9edca7..0924443be 100644 --- a/projects/app/src/components/core/app/Tip.tsx +++ b/projects/app/src/components/core/app/Tip.tsx @@ -58,8 +58,8 @@ const ChatFunctionTip = ({ type }: { type: `${FnTypeEnum}` }) => { [FnTypeEnum.visionModel]: { icon: '/imgs/app/question.svg', title: t('app:vision_model_title'), - desc: t('app:llm_use_vision_tip'), - imgUrl: '/imgs/app/visionModel.png' + desc: t('app:open_vision_function_tip'), + imgUrl: '/imgs/app/visionModel.svg' }, [FnTypeEnum.instruction]: { icon: '/imgs/app/help.svg', diff --git a/projects/app/src/components/core/app/VariableEdit.tsx b/projects/app/src/components/core/app/VariableEdit.tsx index 4745edf84..e7e405737 100644 --- a/projects/app/src/components/core/app/VariableEdit.tsx +++ b/projects/app/src/components/core/app/VariableEdit.tsx @@ -65,10 +65,6 @@ const VariableEdit = ({ const { setValue, reset, watch, getValues } = form; const value = getValues(); const type = watch('type'); - const valueType = watch('valueType'); - const max = watch('max'); - const min = watch('min'); - const defaultValue = watch('defaultValue'); const inputTypeList = useMemo( () => @@ -376,11 +372,7 @@ const VariableEdit = ({ type={'variable'} isEdit={!!value.key} inputType={type} - valueType={valueType} - defaultValue={defaultValue} defaultValueType={defaultValueType} - max={max} - min={min} onClose={() => reset({})} onSubmitSuccess={onSubmitSuccess} onSubmitError={onSubmitError} diff --git a/projects/app/src/components/core/chat/ChatContainer/ChatBox/Input/ChatInput.tsx b/projects/app/src/components/core/chat/ChatContainer/ChatBox/Input/ChatInput.tsx index 35cbfdef8..04f49d12b 100644 --- a/projects/app/src/components/core/chat/ChatContainer/ChatBox/Input/ChatInput.tsx +++ b/projects/app/src/components/core/chat/ChatContainer/ChatBox/Input/ChatInput.tsx @@ -8,7 +8,7 @@ import MyIcon from '@fastgpt/web/components/common/Icon'; import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; import { ChatBoxInputFormType, ChatBoxInputType, SendPromptFnType } from '../type'; import { textareaMinH } from '../constants'; -import { UseFormReturn } from 'react-hook-form'; +import { useFieldArray, UseFormReturn } from 'react-hook-form'; import { ChatBoxContext } from '../Provider'; import dynamic from 'next/dynamic'; import { useContextSelector } from 'use-context-selector'; @@ -58,6 +58,10 @@ const ChatInput = ({ fileSelectConfig } = useContextSelector(ChatBoxContext, (v) => v); + const fileCtrl = useFieldArray({ + control, + name: 'files' + }); const { File, onOpenSelectFile, @@ -74,7 +78,7 @@ const ChatInput = ({ outLinkAuthData, chatId: chatId || '', fileSelectConfig, - control + fileCtrl }); const havInput = !!inputValue || fileList.length > 0; const hasFileUploading = fileList.some((item) => !item.url); @@ -468,7 +472,7 @@ const ChatInput = ({ {RenderTranslateLoading} {/* file preview */} - + diff --git a/projects/app/src/components/core/chat/ChatContainer/ChatBox/components/VariableInput.tsx b/projects/app/src/components/core/chat/ChatContainer/ChatBox/components/VariableInput.tsx index d33073819..50c077d88 100644 --- a/projects/app/src/components/core/chat/ChatContainer/ChatBox/components/VariableInput.tsx +++ b/projects/app/src/components/core/chat/ChatContainer/ChatBox/components/VariableInput.tsx @@ -64,14 +64,14 @@ export const VariableInputItem = ({ minH={40} maxH={160} bg={'myGray.50'} - {...register(item.key, { + {...register(`variables.${item.key}`, { required: item.required })} /> )} {item.type === VariableInputEnum.textarea && (