4.8 preview (#1288)

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Newflow (#89)

* docs: Add doc for Xinference (#1266)

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* rename code

* move code

* update flow

* input type selector

* perf: workflow runtime

* feat: node adapt newflow

* feat: adapt plugin

* feat: 360 connection

* check workflow

* perf: flow 性能

* change plugin input type (#81)

* change plugin input type

* plugin label mode

* perf: nodecard

* debug

* perf: debug ui

* connection ui

* change workflow ui (#82)

* feat: workflow debug

* adapt openAPI for new workflow (#83)

* adapt openAPI for new workflow

* i18n

* perf: plugin debug

* plugin input ui

* delete

* perf: global variable select

* fix rebase

* perf: workflow performance

* feat: input render type icon

* input icon

* adapt flow (#84)

* adapt newflow

* temp

* temp

* fix

* feat: app schedule trigger

* feat: app schedule trigger

* perf: schedule ui

* feat: ioslatevm run js code

* perf: workflow varialbe table ui

* feat: adapt simple mode

* feat: adapt input params

* output

* feat: adapt tamplate

* fix: ts

* add if-else module (#86)

* perf: worker

* if else node

* perf: tiktoken worker

* fix: ts

* perf: tiktoken

* fix if-else node (#87)

* fix if-else node

* type

* fix

* perf: audio render

* perf: Parallel worker

* log

* perf: if else node

* adapt plugin

* prompt

* perf: reference ui

* reference ui

* handle ux

* template ui and plugin tool

* adapt v1 workflow

* adapt v1 workflow completions

* perf: time variables

* feat: workflow keyboard shortcuts

* adapt v1 workflow

* update workflow example doc (#88)

* fix: simple mode select tool

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>

* doc

* perf: extract node

* extra node field

* update plugin version

* doc

* variable

* change doc & fix prompt editor (#90)

* fold workflow code

* value type label

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-04-25 17:51:20 +08:00
committed by GitHub
parent b08d81f887
commit 439c819ff1
505 changed files with 23570 additions and 18215 deletions

View File

@@ -1,50 +1,39 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import { countMessagesTokens } from '../../../../common/string/tiktoken/index';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/workflow/type/index.d';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
}>;
type CQResponse = DispatchNodeResultType<{
[key: string]: any;
[NodeOutputKeyEnum.cqResult]: string;
}>;
type ActionProps = Props & { cqModel: LLMModelItemType };
const agentFunName = 'classify_question';
/* request openai chat */
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
const {
user,
module: { name },
node: { nodeId, name },
histories,
params: { model, history = 6, agents, userChatInput }
} = props as Props;
@@ -57,27 +46,11 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
const chatHistories = getHistories(history, histories);
const { arg, tokens } = await (async () => {
if (cqModel.toolChoice) {
return toolChoice({
...props,
histories: chatHistories,
cqModel
});
}
if (cqModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
cqModel
});
}
return completions({
...props,
histories: chatHistories,
cqModel
});
})();
const { arg, tokens } = await completions({
...props,
histories: chatHistories,
cqModel
});
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
@@ -88,7 +61,10 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
});
return {
[result.key]: true,
[NodeOutputKeyEnum.cqResult]: result.value,
[DispatchNodeResponseKeyEnum.skipHandleId]: agents
.filter((item) => item.key !== arg?.type)
.map((item) => getHandleId(nodeId, 'source', item.key)),
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
@@ -109,164 +85,6 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
};
};
const getFunctionCallSchema = ({
cqModel,
histories,
params: { agents, systemPrompt, userChatInput }
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: systemPrompt
? `<背景知识>
${systemPrompt}
</背景知识>
问题: "${userChatInput}"
`
: userChatInput
}
}
]
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: cqModel.maxContext
});
// function body
const agentFunction = {
name: agentFunName,
description: '结合对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
parameters: {
type: 'object',
properties: {
type: {
type: 'string',
description: `问题类型。下面是几种可选的问题类型: ${agents
.map((item) => `${item.value},返回:'${item.key}'`)
.join('')}`,
enum: agents.map((item) => item.key)
}
},
required: ['type']
}
};
return {
agentFunction,
filterMessages
};
};
const toolChoice = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
// function body
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
try {
const arg = JSON.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
);
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, tools)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const functionCall = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
cqModel,
user,
@@ -283,11 +101,11 @@ const completions = async ({
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
.join('\n'),
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
.join('------'),
history: histories
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
.join('\n'),
.join('------'),
question: userChatInput
})
}
@@ -309,11 +127,14 @@ const completions = async ({
});
const answer = data.choices?.[0].message?.content || '';
console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));
console.log(answer, '----');
const id =
agents.find((item) => answer.includes(item.key) || answer.includes(item.value))?.key || '';
return {
tokens: countMessagesTokens(messages),
tokens: await countMessagesTokens(messages),
arg: { type: id }
};
};

View File

@@ -2,15 +2,15 @@ import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
countMessagesTokens,
countGptMessagesTokens
} from '../../../../common/string/tiktoken/index';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/module/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/workflow/type/index.d';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
@@ -24,20 +24,20 @@ import {
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
[ModuleInputKeyEnum.contextExtractInput]: string;
[ModuleInputKeyEnum.extractKeys]: ContextExtractAgentItemType[];
[ModuleInputKeyEnum.description]: string;
[ModuleInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.contextExtractInput]: string;
[NodeInputKeyEnum.extractKeys]: ContextExtractAgentItemType[];
[NodeInputKeyEnum.description]: string;
[NodeInputKeyEnum.aiModel]: string;
}>;
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.success]?: boolean;
[ModuleOutputKeyEnum.failed]?: boolean;
[ModuleOutputKeyEnum.contextExtractFields]: string;
[NodeOutputKeyEnum.success]?: boolean;
[NodeOutputKeyEnum.failed]?: boolean;
[NodeOutputKeyEnum.contextExtractFields]: string;
}>;
type ActionProps = Props & { extractModel: LLMModelItemType };
@@ -47,7 +47,7 @@ const agentFunName = 'request_function';
export async function dispatchContentExtract(props: Props): Promise<Response> {
const {
user,
module: { name },
node: { name },
histories,
params: { content, history = 6, model, description, extractKeys }
} = props;
@@ -119,9 +119,10 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
});
return {
[ModuleOutputKeyEnum.success]: success ? true : undefined,
[ModuleOutputKeyEnum.failed]: success ? undefined : true,
[ModuleOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
// [DispatchNodeResponseKeyEnum.skipHandleId]: success
// ? [getHandleId(nodeId, 'source', NodeOutputKeyEnum.failed)]
// : [getHandleId(nodeId, 'source', NodeOutputKeyEnum.success)],
[NodeOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
...arg,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
@@ -143,7 +144,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
};
}
const getFunctionCallSchema = ({
const getFunctionCallSchema = async ({
extractModel,
histories,
params: { content, extractKeys, description }
@@ -171,7 +172,7 @@ ${description ? `- ${description}` : ''}
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
const filterMessages = await filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: extractModel.maxContext
});
@@ -196,7 +197,8 @@ ${description ? `- ${description}` : ''}
description: '需要执行的函数',
parameters: {
type: 'object',
properties
properties,
required: []
}
};
@@ -209,7 +211,7 @@ ${description ? `- ${description}` : ''}
const toolChoice = async (props: ActionProps) => {
const { user, extractModel } = props;
const { filterMessages, agentFunction } = getFunctionCallSchema(props);
const { filterMessages, agentFunction } = await getFunctionCallSchema(props);
const tools: ChatCompletionTool[] = [
{
@@ -252,7 +254,7 @@ const toolChoice = async (props: ActionProps) => {
}
];
return {
tokens: countGptMessagesTokens(completeMessages, tools),
tokens: await countGptMessagesTokens(completeMessages, tools),
arg
};
};
@@ -260,7 +262,7 @@ const toolChoice = async (props: ActionProps) => {
const functionCall = async (props: ActionProps) => {
const { user, extractModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
@@ -290,7 +292,7 @@ const functionCall = async (props: ActionProps) => {
return {
arg,
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
tokens: await countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
@@ -355,7 +357,7 @@ Human: ${content}`
if (start === -1 || end === -1) {
return {
rawResponse: answer,
tokens: countMessagesTokens(messages),
tokens: await countMessagesTokens(messages),
arg: {}
};
}
@@ -368,14 +370,14 @@ Human: ${content}`
try {
return {
rawResponse: answer,
tokens: countMessagesTokens(messages),
tokens: await countMessagesTokens(messages),
arg: json5.parse(jsonStr) as Record<string, any>
};
} catch (error) {
console.log(error);
return {
rawResponse: answer,
tokens: countMessagesTokens(messages),
tokens: await countMessagesTokens(messages),
arg: {}
};
}

View File

@@ -16,44 +16,36 @@ import {
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
type FunctionRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
toolRunResponse: DispatchFlowResponse;
functionCallMsg: ChatCompletionFunctionMessageParam;
}[];
export const runToolWithFunctionCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
const assistantResponses = response?.assistantResponses || [];
const functions: ChatCompletionCreateParams.Function[] = toolModules.map((module) => {
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
const properties: Record<
string,
{
@@ -62,7 +54,7 @@ export const runToolWithFunctionCall = async (
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
item.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
@@ -70,17 +62,17 @@ export const runToolWithFunctionCall = async (
});
return {
name: module.moduleId,
description: module.intro,
name: item.nodeId,
description: item.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
required: item.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
});
const filterMessages = filterGPTMessageByMaxTokens({
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
@@ -107,25 +99,25 @@ export const runToolWithFunctionCall = async (
);
const { answer, functionCalls } = await (async () => {
if (stream) {
if (res && stream) {
return streamResponse({
res,
detail,
toolModules,
toolNodes,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const function_call = result.choices?.[0]?.message?.function_call;
const toolModule = toolModules.find((module) => module.moduleId === function_call?.name);
const toolNode = toolNodes.find((node) => node.nodeId === function_call?.name);
const toolCalls = function_call
? [
{
...function_call,
id: getNanoid(),
toolName: toolModule?.name,
toolAvatar: toolModule?.avatar
toolName: toolNode?.name,
toolAvatar: toolNode?.avatar
}
]
: [];
@@ -143,9 +135,9 @@ export const runToolWithFunctionCall = async (
functionCalls.map(async (tool) => {
if (!tool) return;
const toolModule = toolModules.find((module) => module.moduleId === tool.name);
const toolNode = toolNodes.find((node) => node.nodeId === tool.name);
if (!toolModule) return;
if (!toolNode) return;
const startParams = (() => {
try {
@@ -155,21 +147,25 @@ export const runToolWithFunctionCall = async (
}
})();
const moduleRunResponse = await dispatchWorkFlow({
const toolRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: item
)
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
if (typeof toolRunResponse.toolResponses === 'object') {
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
})();
const functionCallMsg: ChatCompletionFunctionMessageParam = {
@@ -195,17 +191,17 @@ export const runToolWithFunctionCall = async (
}
return {
moduleRunResponse,
toolRunResponse,
functionCallMsg
};
})
)
).filter(Boolean) as FunctionRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
const functionCall = functionCalls[0];
if (functionCall && !res.closed) {
if (functionCall && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
@@ -215,7 +211,7 @@ export const runToolWithFunctionCall = async (
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, undefined, functions);
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.functionCallMsg)
@@ -225,14 +221,14 @@ export const runToolWithFunctionCall = async (
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
name: node.name
});
}
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
const assistantResponses = item.toolRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
@@ -282,7 +278,7 @@ export const runToolWithFunctionCall = async (
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = countGptMessagesTokens(completeMessages, undefined, functions);
const tokens = await countGptMessagesTokens(completeMessages, undefined, functions);
// console.log(tokens, 'response token');
// concat tool assistant
@@ -300,12 +296,12 @@ export const runToolWithFunctionCall = async (
async function streamResponse({
res,
detail,
toolModules,
toolNodes,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
toolNodes: ToolNodeItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
@@ -324,6 +320,7 @@ async function streamResponse({
}
const responseChoice = part.choices?.[0]?.delta;
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
@@ -344,9 +341,9 @@ async function streamResponse({
// 流响应中,每次只会返回一个函数如果带了name说明触发某个函数
if (functionCall?.name) {
functionId = getNanoid();
const toolModule = toolModules.find((module) => module.moduleId === functionCall?.name);
const toolNode = toolNodes.find((item) => item.nodeId === functionCall?.name);
if (toolModule) {
if (toolNode) {
if (functionCall?.arguments === undefined) {
functionCall.arguments = '';
}
@@ -354,8 +351,8 @@ async function streamResponse({
...functionCall,
id: functionId,
name: functionCall.name,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
toolName: toolNode.name,
toolAvatar: toolNode.avatar
});
if (detail) {
@@ -365,8 +362,8 @@ async function streamResponse({
data: JSON.stringify({
tool: {
id: functionId,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
toolName: toolNode.name,
toolAvatar: toolNode.avatar,
functionName: functionCall.name,
params: functionCall.arguments,
response: ''

View File

@@ -1,13 +1,13 @@
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type {
DispatchNodeResultType,
RunningModuleItemType
} from '@fastgpt/global/core/module/runtime/type';
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
import { getHistories } from '../../utils';
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolModuleItemType } from './type.d';
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
@@ -27,8 +27,9 @@ type Response = DispatchNodeResultType<{}>;
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
module: { name, outputs },
runtimeModules,
node: { nodeId, name, outputs },
runtimeNodes,
runtimeEdges,
histories,
params: { model, systemPrompt, userChatInput, history = 6 }
} = props;
@@ -38,26 +39,19 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
/* get tool params */
// get tool output targets
const toolOutput = outputs.find((output) => output.key === ModuleOutputKeyEnum.selectedTools);
if (!toolOutput) {
return Promise.reject('No tool output found');
}
const targets = toolOutput.targets;
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
// Gets the module to which the tool is connected
const toolModules = targets
.map((item) => {
const tool = runtimeModules.find((module) => module.moduleId === item.moduleId);
const toolNodes = toolNodeIds
.map((nodeId) => {
const tool = runtimeNodes.find((item) => item.nodeId === nodeId);
return tool;
})
.filter(Boolean)
.map<ToolModuleItemType>((tool) => {
.map<ToolNodeItemType>((tool) => {
const toolParams = tool?.inputs.filter((input) => !!input.toolDescription) || [];
return {
...(tool as RunningModuleItemType),
...(tool as RuntimeNodeItemType),
toolParams
};
});
@@ -85,7 +79,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
if (toolModel.toolChoice) {
return runToolWithToolChoice({
...props,
toolModules,
toolNodes,
toolModel,
messages: adaptMessages
});
@@ -93,7 +87,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
if (toolModel.functionCall) {
return runToolWithFunctionCall({
...props,
toolModules,
toolNodes,
toolModel,
messages: adaptMessages
});
@@ -110,7 +104,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
return runToolWithPromptCall({
...props,
toolModules,
toolNodes,
toolModel,
messages: adaptMessages
});

View File

@@ -13,16 +13,17 @@ import {
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
import json5 from 'json5';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid, replaceVariable } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
type FunctionCallCompletion = {
id: string;
@@ -35,25 +36,16 @@ type FunctionCallCompletion = {
export const runToolWithPromptCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
const assistantResponses = response?.assistantResponses || [];
const toolsPrompt = JSON.stringify(
toolModules.map((module) => {
toolNodes.map((item) => {
const properties: Record<
string,
{
@@ -62,7 +54,7 @@ export const runToolWithPromptCall = async (
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
item.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
@@ -70,12 +62,12 @@ export const runToolWithPromptCall = async (
});
return {
toolId: module.moduleId,
description: module.intro,
toolId: item.nodeId,
description: item.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
required: item.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
})
@@ -89,7 +81,7 @@ export const runToolWithPromptCall = async (
toolsPrompt
});
const filterMessages = filterGPTMessageByMaxTokens({
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
@@ -114,11 +106,11 @@ export const runToolWithPromptCall = async (
);
const answer = await (async () => {
if (stream) {
if (res && stream) {
const { answer } = await streamResponse({
res,
detail,
toolModules,
toolNodes,
stream: aiResponse
});
@@ -140,7 +132,7 @@ export const runToolWithPromptCall = async (
content: parseAnswerResult
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = countGptMessagesTokens(completeMessages, undefined);
const tokens = await countGptMessagesTokens(completeMessages, undefined);
// console.log(tokens, 'response token');
// concat tool assistant
@@ -158,11 +150,11 @@ export const runToolWithPromptCall = async (
const toolsRunResponse = await (async () => {
if (!parseAnswerResult) return Promise.reject('tool run error');
const toolModule = toolModules.find((module) => module.moduleId === parseAnswerResult.name);
if (!toolModule) return Promise.reject('tool not found');
const toolNode = toolNodes.find((item) => item.nodeId === parseAnswerResult.name);
if (!toolNode) return Promise.reject('tool not found');
parseAnswerResult.toolName = toolModule.name;
parseAnswerResult.toolAvatar = toolModule.avatar;
parseAnswerResult.toolName = toolNode.name;
parseAnswerResult.toolAvatar = toolNode.avatar;
// run tool flow
const startParams = (() => {
@@ -181,8 +173,8 @@ export const runToolWithPromptCall = async (
data: JSON.stringify({
tool: {
id: parseAnswerResult.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
toolName: toolNode.name,
toolAvatar: toolNode.avatar,
functionName: parseAnswerResult.name,
params: parseAnswerResult.arguments,
response: ''
@@ -193,11 +185,15 @@ export const runToolWithPromptCall = async (
const moduleRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: item
)
});
const stringToolResponse = (() => {
@@ -233,7 +229,7 @@ export const runToolWithPromptCall = async (
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
name: node.name
});
}
@@ -246,7 +242,7 @@ export const runToolWithPromptCall = async (
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, undefined);
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);
const completeMessages: ChatCompletionMessageParam[] = [
...concatToolMessages,
{
@@ -308,7 +304,7 @@ async function streamResponse({
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
toolNodes: ToolNodeItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
@@ -326,6 +322,8 @@ async function streamResponse({
}
const responseChoice = part.choices?.[0]?.delta;
// console.log(responseChoice, '---===');
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
@@ -360,7 +358,6 @@ async function streamResponse({
if (!textAnswer) {
return Promise.reject('LLM api response empty');
}
// console.log(textAnswer, '---===');
return { answer: textAnswer.trim() };
}

View File

@@ -1,6 +1,6 @@
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
export type AnswerProps = ModuleDispatchProps<{}>;
export type AnswerResponse = DispatchNodeResultType<{}>;

View File

@@ -17,19 +17,20 @@ import {
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils';
type ToolRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
toolRunResponse: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
@@ -43,24 +44,15 @@ type ToolRunResponseType = {
export const runToolWithToolChoice = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
const assistantResponses = response?.assistantResponses || [];
const tools: ChatCompletionTool[] = toolModules.map((module) => {
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
const properties: Record<
string,
{
@@ -69,7 +61,7 @@ export const runToolWithToolChoice = async (
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
item.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
@@ -79,18 +71,18 @@ export const runToolWithToolChoice = async (
return {
type: 'function',
function: {
name: module.moduleId,
description: module.intro,
name: item.nodeId,
description: item.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
required: item.toolParams.filter((item) => item.required).map((item) => item.key)
}
}
};
});
const filterMessages = filterGPTMessageByMaxTokens({
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
});
@@ -117,11 +109,11 @@ export const runToolWithToolChoice = async (
);
const { answer, toolCalls } = await (async () => {
if (stream) {
if (res && stream) {
return streamResponse({
res,
detail,
toolModules,
toolNodes,
stream: aiResponse
});
} else {
@@ -130,11 +122,11 @@ export const runToolWithToolChoice = async (
// 加上name和avatar
const toolCalls = calls.map((tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
return {
...tool,
toolName: toolModule?.name || '',
toolAvatar: toolModule?.avatar || ''
toolName: toolNode?.name || '',
toolAvatar: toolNode?.avatar || ''
};
});
@@ -145,13 +137,13 @@ export const runToolWithToolChoice = async (
}
})();
// Run the selected tool.
// Run the selected tool by LLM.
const toolsRunResponse = (
await Promise.all(
toolCalls.map(async (tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
if (!toolModule) return;
if (!toolNode) return;
const startParams = (() => {
try {
@@ -161,21 +153,25 @@ export const runToolWithToolChoice = async (
}
})();
const moduleRunResponse = await dispatchWorkFlow({
const toolRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: item
)
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
if (typeof toolRunResponse.toolResponses === 'object') {
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
})();
const toolMsgParams: ChatCompletionToolMessageParam = {
@@ -202,15 +198,15 @@ export const runToolWithToolChoice = async (
}
return {
moduleRunResponse,
toolRunResponse,
toolMsgParams
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
if (toolCalls.length > 0 && !res.closed) {
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
if (toolCalls.length > 0 && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
@@ -220,7 +216,7 @@ export const runToolWithToolChoice = async (
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, tools);
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
@@ -231,14 +227,14 @@ export const runToolWithToolChoice = async (
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
name: node.name
});
}
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
const assistantResponses = item.toolRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
@@ -289,7 +285,7 @@ export const runToolWithToolChoice = async (
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = countGptMessagesTokens(completeMessages, tools);
const tokens = await countGptMessagesTokens(completeMessages, tools);
// console.log(tokens, 'response token');
// concat tool assistant
@@ -307,12 +303,12 @@ export const runToolWithToolChoice = async (
async function streamResponse({
res,
detail,
toolModules,
toolNodes,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
toolNodes: ToolNodeItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
@@ -347,18 +343,16 @@ async function streamResponse({
// 流响应中,每次只会返回一个工具. 如果带了 id说明是执行一个工具
if (toolCall.id) {
const toolModule = toolModules.find(
(module) => module.moduleId === toolCall.function?.name
);
const toolNode = toolNodes.find((item) => item.nodeId === toolCall.function?.name);
if (toolModule) {
if (toolNode) {
if (toolCall.function?.arguments === undefined) {
toolCall.function.arguments = '';
}
toolCalls.push({
...toolCall,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
toolName: toolNode.name,
toolAvatar: toolNode.avatar
});
if (detail) {
@@ -368,8 +362,8 @@ async function streamResponse({
data: JSON.stringify({
tool: {
id: toolCall.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
toolName: toolNode.name,
toolAvatar: toolNode.avatar,
functionName: toolCall.function.name,
params: toolCall.function.arguments,
response: ''

View File

@@ -1,20 +1,19 @@
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { FlowNodeInputItemType } from '@fastgpt/global/core/module/node/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
} from '@fastgpt/global/core/module/type.d';
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type';
} from '@fastgpt/global/core/workflow/type/index.d';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type.d';
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]: string;
[ModuleInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.userChatInput]: string;
}>;
export type RunToolResponse = {
@@ -23,6 +22,6 @@ export type RunToolResponse = {
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
};
export type ToolModuleItemType = RunningModuleItemType & {
toolParams: RunningModuleItemType['inputs'];
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
};

View File

@@ -0,0 +1,14 @@
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
export const updateToolInputValue = ({
params,
inputs
}: {
params: Record<string, any>;
inputs: FlowNodeInputItemType[];
}) => {
return inputs.map((input) => ({
...input,
value: params[input.key] ?? input.value
}));
};

View File

@@ -6,8 +6,8 @@ import {
} from '../../../chat/utils';
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { getAIApi } from '../../../ai/config';
import type {
ChatCompletion,
@@ -18,12 +18,11 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type { ModuleItemType } from '@fastgpt/global/core/module/type.d';
import type { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import type { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
} from '../../../../common/string/tiktoken/index';
import {
chats2GPTMessages,
getSystemPrompt,
@@ -34,28 +33,28 @@ import {
Prompt_QuotePromptList,
Prompt_QuoteTemplateList
} from '@fastgpt/global/core/ai/prompt/AIChat';
import type { AIChatModuleProps } from '@fastgpt/global/core/module/node/type.d';
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { responseWrite, responseWriteController } from '../../../../common/response';
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
import { filterSearchResultsByMaxChars } from '../../utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
export type ChatProps = ModuleDispatchProps<
AIChatModuleProps & {
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
AIChatNodeProps & {
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
}
>;
export type ChatResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[ModuleOutputKeyEnum.history]: ChatItemType[];
[NodeOutputKeyEnum.answerText]: string;
[NodeOutputKeyEnum.history]: ChatItemType[];
}>;
/* request openai chat */
@@ -66,7 +65,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
detail = false,
user,
histories,
module: { name, outputs },
node: { name },
inputFiles = [],
params: {
model,
@@ -95,7 +94,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
const { quoteText } = filterQuote({
const { quoteText } = await filterQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
@@ -111,7 +110,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
});
}
const { filterMessages } = getChatMessages({
const { filterMessages } = await getChatMessages({
model: modelConstantsData,
histories: chatHistories,
quoteQA,
@@ -182,7 +181,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
);
const { answerText } = await (async () => {
if (stream) {
if (res && stream) {
// sse response
const { answer } = await streamResponse({
res,
@@ -190,8 +189,6 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
stream: response
});
targetResponse({ res, detail, outputs });
return {
answerText: answer
};
@@ -211,7 +208,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
});
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
const tokens = countMessagesTokens(chatCompleteMessages);
const tokens = await countMessagesTokens(chatCompleteMessages);
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens,
@@ -242,7 +239,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
};
};
function filterQuote({
async function filterQuote({
quoteQA = [],
model,
quoteTemplate
@@ -262,7 +259,7 @@ function filterQuote({
}
// slice filterSearch
const filterQuoteQA = filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
const filterQuoteQA = await filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
const quoteText =
filterQuoteQA.length > 0
@@ -273,7 +270,7 @@ function filterQuote({
quoteText
};
}
function getChatMessages({
async function getChatMessages({
quotePrompt,
quoteText,
quoteQA,
@@ -313,7 +310,7 @@ function getChatMessages({
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
const filterMessages = await filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: model.maxContext - 300 // filter token. not response maxToken
});
@@ -322,7 +319,7 @@ function getChatMessages({
filterMessages
};
}
function getMaxTokens({
async function getMaxTokens({
maxToken,
model,
filterMessages = []
@@ -335,7 +332,7 @@ function getMaxTokens({
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = countGptMessagesTokens(filterMessages);
const promptsToken = await countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
@@ -346,28 +343,6 @@ function getMaxTokens({
};
}
function targetResponse({
res,
outputs,
detail
}: {
res: NextApiResponse;
outputs: ModuleItemType['outputs'];
detail: boolean;
}) {
const targets =
outputs.find((output) => output.key === ModuleOutputKeyEnum.answerText)?.targets || [];
if (targets.length === 0) return;
responseWrite({
res,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
});
}
async function streamResponse({
res,
detail,

View File

@@ -0,0 +1,38 @@
import { addLog } from '../../../../common/system/log';
const ivm = require('isolated-vm');
export const runJsCode = ({
code,
variables
}: {
code: string;
variables: Record<string, any>;
}) => {
const isolate = new ivm.Isolate({ memoryLimit: 16 });
const context = isolate.createContextSync();
const jail = context.global;
return new Promise((resolve, reject) => {
// custom log function
jail.setSync('responseData', function (args: any): any {
if (typeof args === 'object') {
resolve(args);
} else {
reject('Not an invalid response');
}
});
// Add global variables
jail.setSync('variables', new ivm.ExternalCopy(variables).copyInto());
try {
const scriptCode = `
${code}
responseData(main(variables))`;
context.evalSync(scriptCode, { timeout: 2000 });
} catch (err) {
addLog.error('Error during script execution:', err);
reject(err);
}
});
};

View File

@@ -1,16 +1,16 @@
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
import { filterSearchResultsByMaxChars } from '../../utils';
type DatasetConcatProps = ModuleDispatchProps<
{
[ModuleInputKeyEnum.datasetMaxTokens]: number;
[NodeInputKeyEnum.datasetMaxTokens]: number;
} & { [key: string]: SearchDataResponseItemType[] }
>;
type DatasetConcatResponse = {
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
};
export async function dispatchDatasetConcat(
@@ -30,6 +30,6 @@ export async function dispatchDatasetConcat(
);
return {
[ModuleOutputKeyEnum.datasetQuoteQA]: filterSearchResultsByMaxChars(rrfConcatResults, limit)
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(rrfConcatResults, limit)
};
}

View File

@@ -1,15 +1,15 @@
import {
DispatchNodeResponseType,
DispatchNodeResultType
} from '@fastgpt/global/core/module/runtime/type.d';
} from '@fastgpt/global/core/workflow/runtime/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { SelectedDatasetType } from '@fastgpt/global/core/module/api.d';
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { ModelTypeEnum, getLLMModel, getVectorModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
@@ -17,20 +17,18 @@ import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
type DatasetSearchProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.datasetSelectList]: SelectedDatasetType;
[ModuleInputKeyEnum.datasetSimilarity]: number;
[ModuleInputKeyEnum.datasetMaxTokens]: number;
[ModuleInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.datasetSearchUsingReRank]: boolean;
[ModuleInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[ModuleInputKeyEnum.datasetSearchExtensionModel]: string;
[ModuleInputKeyEnum.datasetSearchExtensionBg]: string;
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
[NodeInputKeyEnum.datasetSimilarity]: number;
[NodeInputKeyEnum.datasetMaxTokens]: number;
[NodeInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.datasetSearchUsingReRank]: boolean;
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]: string;
}>;
export type DatasetSearchResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.datasetIsEmpty]?: boolean;
[ModuleOutputKeyEnum.datasetUnEmpty]?: boolean;
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
}>;
export async function dispatchDatasetSearch(
@@ -39,7 +37,7 @@ export async function dispatchDatasetSearch(
const {
teamId,
histories,
module,
node,
params: {
datasets = [],
similarity,
@@ -67,10 +65,10 @@ export async function dispatchDatasetSearch(
}
// query extension
const extensionModel =
datasetSearchUsingExtensionQuery && datasetSearchExtensionModel
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
query: userChatInput,
extensionModel,
@@ -122,7 +120,7 @@ export async function dispatchDatasetSearch(
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: module.name,
moduleName: node.name,
model: modelName,
tokens
}
@@ -151,8 +149,6 @@ export async function dispatchDatasetSearch(
}
return {
isEmpty: searchRes.length === 0 ? true : undefined,
unEmpty: searchRes.length > 0 ? true : undefined,
quoteQA: searchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
nodeDispatchUsages,

View File

@@ -1,54 +1,57 @@
import { NextApiResponse } from 'next';
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ChatDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { NodeInputKeyEnum, WorkflowIOValueTypeEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ChatDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import type {
AIChatItemValueItemType,
ChatHistoryItemResType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type.d';
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { ModuleItemType } from '@fastgpt/global/core/module/type';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { responseWriteNodeStatus } from '../../../common/response';
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
import { dispatchHistory } from './init/history';
import { dispatchChatInput } from './init/userChatInput';
import { dispatchWorkflowStart } from './init/workflowStart';
import { dispatchChatCompletion } from './chat/oneapi';
import { dispatchDatasetSearch } from './dataset/search';
import { dispatchDatasetConcat } from './dataset/concat';
import { dispatchAnswer } from './tools/answer';
import { dispatchClassifyQuestion } from './agent/classifyQuestion';
import { dispatchContentExtract } from './agent/extract';
import { dispatchHttpRequest } from './tools/http';
import { dispatchHttp468Request } from './tools/http468';
import { dispatchAppRequest } from './tools/runApp';
import { dispatchQueryExtension } from './tools/queryExternsion';
import { dispatchRunPlugin } from './plugin/run';
import { dispatchPluginInput } from './plugin/runInput';
import { dispatchPluginOutput } from './plugin/runOutput';
import { checkTheModuleConnectedByTool, valueTypeFormat } from './utils';
import { valueTypeFormat } from './utils';
import {
filterWorkflowEdges,
checkNodeRunStatus
} from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { dispatchRunTools } from './agent/runTool/index';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { DispatchFlowResponse } from './type';
import { dispatchStopToolCall } from './agent/runTool/stopTool';
import { dispatchLafRequest } from './tools/runLaf';
import { dispatchIfElse } from './tools/runIfElse';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { getReferenceVariableValue } from '@fastgpt/global/core/workflow/runtime/utils';
import { dispatchSystemConfig } from './init/systemConfiig';
const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
[FlowNodeTypeEnum.historyNode]: dispatchHistory,
[FlowNodeTypeEnum.questionInput]: dispatchChatInput,
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
[FlowNodeTypeEnum.answerNode]: dispatchAnswer,
[FlowNodeTypeEnum.chatNode]: dispatchChatCompletion,
[FlowNodeTypeEnum.datasetSearchNode]: dispatchDatasetSearch,
[FlowNodeTypeEnum.datasetConcatNode]: dispatchDatasetConcat,
[FlowNodeTypeEnum.classifyQuestion]: dispatchClassifyQuestion,
[FlowNodeTypeEnum.contentExtract]: dispatchContentExtract,
[FlowNodeTypeEnum.httpRequest]: dispatchHttpRequest,
[FlowNodeTypeEnum.httpRequest468]: dispatchHttp468Request,
[FlowNodeTypeEnum.runApp]: dispatchAppRequest,
[FlowNodeTypeEnum.pluginModule]: dispatchRunPlugin,
@@ -58,17 +61,19 @@ const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
[FlowNodeTypeEnum.tools]: dispatchRunTools,
[FlowNodeTypeEnum.stopTool]: dispatchStopToolCall,
[FlowNodeTypeEnum.lafModule]: dispatchLafRequest,
[FlowNodeTypeEnum.ifElseNode]: dispatchIfElse,
// none
[FlowNodeTypeEnum.userGuide]: () => Promise.resolve()
[FlowNodeTypeEnum.systemConfig]: dispatchSystemConfig,
[FlowNodeTypeEnum.emptyNode]: () => Promise.resolve(),
[FlowNodeTypeEnum.globalVariable]: () => Promise.resolve()
};
/* running */
export async function dispatchWorkFlow({
res,
modules = [],
runtimeModules,
startParams = {},
runtimeNodes = [],
runtimeEdges = [],
histories = [],
variables = {},
user,
@@ -76,12 +81,11 @@ export async function dispatchWorkFlow({
detail = false,
...props
}: ChatDispatchProps & {
modules?: ModuleItemType[]; // app modules
runtimeModules?: RunningModuleItemType[];
startParams?: Record<string, any>; // entry module params
runtimeNodes: RuntimeNodeItemType[];
runtimeEdges: RuntimeEdgeItemType[];
}): Promise<DispatchFlowResponse> {
// set sse response headers
if (stream) {
if (stream && res) {
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('X-Accel-Buffering', 'no');
@@ -92,17 +96,17 @@ export async function dispatchWorkFlow({
...getSystemVariable({ timezone: user.timezone }),
...variables
};
const runningModules = runtimeModules ? runtimeModules : loadModules(modules, variables);
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
let chatNodeUsages: ChatNodeUsageType[] = [];
let toolRunResponse: ToolRunResponseItemType;
let runningTime = Date.now();
let debugNextStepRunNodes: RuntimeNodeItemType[] = [];
/* Store special response field */
function pushStore(
{ inputs = [] }: RunningModuleItemType,
{ inputs = [] }: RuntimeNodeItemType,
{
answerText = '',
responseData,
@@ -110,7 +114,7 @@ export async function dispatchWorkFlow({
toolResponses,
assistantResponses
}: {
[ModuleOutputKeyEnum.answerText]?: string;
[NodeOutputKeyEnum.answerText]?: string;
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
@@ -143,7 +147,7 @@ export async function dispatchWorkFlow({
// save assistant text response
if (answerText) {
const isResponseAnswerText =
inputs.find((item) => item.key === ModuleInputKeyEnum.aiChatIsResponseText)?.value ?? true;
inputs.find((item) => item.key === NodeInputKeyEnum.aiChatIsResponseText)?.value ?? true;
if (isResponseAnswerText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.text,
@@ -156,85 +160,112 @@ export async function dispatchWorkFlow({
runningTime = time;
}
/* Inject data into module input */
function moduleInput(module: RunningModuleItemType, data: Record<string, any> = {}) {
const updateInputValue = (key: string, value: any) => {
const index = module.inputs.findIndex((item: any) => item.key === key);
if (index === -1) return;
module.inputs[index].value = value;
};
Object.entries(data).map(([key, val]: any) => {
updateInputValue(key, val);
});
return;
}
/* Pass the output of the module to the next stage */
function moduleOutput(
module: RunningModuleItemType,
function nodeOutput(
node: RuntimeNodeItemType,
result: Record<string, any> = {}
): Promise<any> {
pushStore(module, result);
): RuntimeNodeItemType[] {
pushStore(node, result);
const nextRunModules: RunningModuleItemType[] = [];
// Assign the output value to the next module
module.outputs.map((outputItem) => {
// Assign the output value to the next node
node.outputs.forEach((outputItem) => {
if (result[outputItem.key] === undefined) return;
/* update output value */
outputItem.value = result[outputItem.key];
/* update target */
outputItem.targets.map((target: any) => {
// find module
const targetModule = runningModules.find((item) => item.moduleId === target.moduleId);
if (!targetModule) return;
// push to running queue
nextRunModules.push(targetModule);
// update input
moduleInput(targetModule, { [target.key]: outputItem.value });
});
});
// Ensure the uniqueness of running modules
const set = new Set<string>();
const filterModules = nextRunModules.filter((module) => {
if (set.has(module.moduleId)) return false;
set.add(module.moduleId);
return true;
});
return checkModulesCanRun(filterModules);
}
function checkModulesCanRun(modules: RunningModuleItemType[] = []) {
return Promise.all(
modules.map((module) => {
if (!module.inputs.find((item: any) => item.value === undefined)) {
// remove switch
moduleInput(module, { [ModuleInputKeyEnum.switch]: undefined });
return moduleRun(module);
}
})
// Get next source edges and update status
const skipHandleId = (result[DispatchNodeResponseKeyEnum.skipHandleId] || []) as string[];
const targetEdges = filterWorkflowEdges(runtimeEdges).filter(
(item) => item.source === node.nodeId
);
}
async function moduleRun(module: RunningModuleItemType): Promise<any> {
if (res.closed || props.maxRunTimes <= 0) return Promise.resolve();
if (stream && detail && module.showStatus) {
// update edge status
targetEdges.forEach((edge) => {
if (skipHandleId.includes(edge.sourceHandle)) {
edge.status = 'skipped';
} else {
edge.status = 'active';
}
});
const nextStepNodes = runtimeNodes.filter((node) => {
return targetEdges.some((item) => item.target === node.nodeId);
});
if (props.mode === 'debug') {
debugNextStepRunNodes = debugNextStepRunNodes.concat(nextStepNodes);
return [];
}
return nextStepNodes;
}
function checkNodeCanRun(nodes: RuntimeNodeItemType[] = []): Promise<any> {
return Promise.all(
nodes.map((node) => {
const status = checkNodeRunStatus({
node,
runtimeEdges
});
if (status === 'run') {
return nodeRunWithActive(node);
}
if (status === 'skip') {
return nodeRunWithSkip(node);
}
return [];
})
).then((result) => {
const flat = result.flat();
if (flat.length === 0) return;
// update output
const nextNodes = flat.map((item) => nodeOutput(item.node, item.result)).flat();
return checkNodeCanRun(nextNodes);
});
}
// 运行完一轮后,清除连线的状态,避免污染进程
function nodeRunFinish(node: RuntimeNodeItemType) {
const edges = runtimeEdges.filter((item) => item.target === node.nodeId);
edges.forEach((item) => {
item.status = 'waiting';
});
}
/* Inject data into module input */
function getNodeRunParams(node: RuntimeNodeItemType) {
const params: Record<string, any> = {};
node.inputs.forEach((input) => {
// replace {{}} variables
let value = replaceVariable(input.value, variables);
// replace reference variables
value = getReferenceVariableValue({
value,
nodes: runtimeNodes,
variables
});
// console.log(JSON.stringify(input, null, 2), '=====================');
// format valueType
params[input.key] = valueTypeFormat(value, input.valueType);
});
return params;
}
async function nodeRunWithActive(node: RuntimeNodeItemType) {
if (res?.closed || props.maxRunTimes <= 0) return [];
// push run status messages
if (res && stream && detail && node.showStatus) {
responseStatus({
res,
name: module.name,
name: node.name,
status: 'running'
});
}
// get module running params
const params: Record<string, any> = {};
module.inputs.forEach((item) => {
params[item.key] = valueTypeFormat(item.value, item.valueType);
});
// get node running params
const params = getNodeRunParams(node);
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
...props,
@@ -244,15 +275,16 @@ export async function dispatchWorkFlow({
user,
stream,
detail,
module,
runtimeModules: runningModules,
node,
runtimeNodes,
runtimeEdges,
params
};
// run module
const dispatchRes: Record<string, any> = await (async () => {
if (callbackMap[module.flowType]) {
return callbackMap[module.flowType](dispatchData);
if (callbackMap[node.flowNodeType]) {
return callbackMap[node.flowNodeType](dispatchData);
}
return {};
})();
@@ -261,139 +293,74 @@ export async function dispatchWorkFlow({
const formatResponseData: ChatHistoryItemResType = (() => {
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
return {
moduleName: module.name,
moduleType: module.flowType,
nodeId: node.nodeId,
moduleName: node.name,
moduleType: node.flowNodeType,
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
};
})();
// Add output default value
module.outputs.forEach((item) => {
node.outputs.forEach((item) => {
if (!item.required) return;
if (dispatchRes[item.key] !== undefined) return;
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
});
// Pass userChatInput
const hasUserChatInputTarget = !!module.outputs.find(
(item) => item.key === ModuleOutputKeyEnum.userChatInput
)?.targets?.length;
nodeRunFinish(node);
return moduleOutput(module, {
[ModuleOutputKeyEnum.finish]: true,
[ModuleOutputKeyEnum.userChatInput]: hasUserChatInputTarget
? params[ModuleOutputKeyEnum.userChatInput]
: undefined,
...dispatchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData,
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]:
dispatchRes[DispatchNodeResponseKeyEnum.nodeDispatchUsages]
});
return {
node,
result: {
...dispatchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData
}
};
}
async function nodeRunWithSkip(node: RuntimeNodeItemType) {
// 其后所有target的节点都设置为skip
const targetEdges = runtimeEdges.filter((item) => item.source === node.nodeId);
nodeRunFinish(node);
return {
node,
result: {
[DispatchNodeResponseKeyEnum.skipHandleId]: targetEdges.map((item) => item.sourceHandle)
}
};
}
// start process width initInput
const initModules = runningModules.filter((item) => item.isEntry);
const entryNodes = runtimeNodes.filter((item) => item.isEntry);
// reset entry
modules.forEach((item) => {
runtimeNodes.forEach((item) => {
item.isEntry = false;
});
initModules.map((module) =>
moduleInput(module, {
...startParams,
history: [] // abandon history field. History module will get histories from other fields.
})
);
await checkModulesCanRun(initModules);
await checkNodeCanRun(entryNodes);
// focus try to run pluginOutput
const pluginOutputModule = runningModules.find(
(item) => item.flowType === FlowNodeTypeEnum.pluginOutput
const pluginOutputModule = runtimeNodes.find(
(item) => item.flowNodeType === FlowNodeTypeEnum.pluginOutput
);
if (pluginOutputModule) {
await moduleRun(pluginOutputModule);
if (pluginOutputModule && props.mode !== 'debug') {
await nodeRunWithActive(pluginOutputModule);
}
return {
flowResponses: chatResponses,
flowUsages: chatNodeUsages,
debugResponse: {
finishedNodes: runtimeNodes,
finishedEdges: runtimeEdges,
nextStepRunNodes: debugNextStepRunNodes
},
[DispatchNodeResponseKeyEnum.assistantResponses]:
concatAssistantResponseAnswerText(chatAssistantResponse),
mergeAssistantResponseAnswerText(chatAssistantResponse),
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse
};
}
/* init store modules to running modules */
function loadModules(
modules: ModuleItemType[],
variables: Record<string, any>
): RunningModuleItemType[] {
return modules
.filter((item) => {
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
})
.map<RunningModuleItemType>((module) => {
return {
moduleId: module.moduleId,
name: module.name,
avatar: module.avatar,
intro: module.intro,
flowType: module.flowType,
showStatus: module.showStatus,
isEntry: module.isEntry,
inputs: module.inputs
.filter(
/*
1. system input must be save
2. connected by source handle
3. manual input value or have default value
4. For the module connected by the tool, leave the toolDescription input
*/
(item) => {
const isTool = checkTheModuleConnectedByTool(modules, module);
if (isTool && item.toolDescription) {
return true;
}
return (
item.type === FlowNodeInputTypeEnum.systemInput ||
item.connected ||
item.value !== undefined
);
}
) // filter unconnected target input
.map((item) => {
const replace = ['string'].includes(typeof item.value);
return {
key: item.key,
// variables replace
value: replace ? replaceVariable(item.value, variables) : item.value,
valueType: item.valueType,
required: item.required,
toolDescription: item.toolDescription
};
}),
outputs: module.outputs
.map((item) => ({
key: item.key,
required: item.required,
defaultValue: item.defaultValue,
answer: item.key === ModuleOutputKeyEnum.answerText,
value: undefined,
valueType: item.valueType,
targets: item.targets
}))
.sort((a, b) => {
// finish output always at last
if (a.key === ModuleOutputKeyEnum.finish) return 1;
if (b.key === ModuleOutputKeyEnum.finish) return -1;
return 0;
})
};
});
}
/* sse response modules staus */
export function responseStatus({
res,
@@ -418,7 +385,8 @@ export function getSystemVariable({ timezone }: { timezone: string }) {
};
}
export const concatAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
/* Merge consecutive text messages into one */
export const mergeAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
const result: AIChatItemValueItemType[] = [];
// 合并连续的text
for (let i = 0; i < response.length; i++) {

View File

@@ -0,0 +1,10 @@
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
export type UserChatInputProps = ModuleDispatchProps<{
[NodeInputKeyEnum.userChatInput]: string;
}>;
export const dispatchSystemConfig = (props: Record<string, any>) => {
const { variables } = props as UserChatInputProps;
return variables;
};

View File

@@ -0,0 +1,15 @@
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
export type UserChatInputProps = ModuleDispatchProps<{
[NodeInputKeyEnum.userChatInput]: string;
}>;
export const dispatchWorkflowStart = (props: Record<string, any>) => {
const {
variables: { userChatInput },
params: { userChatInput: query }
} = props as UserChatInputProps;
return {
userChatInput: query || userChatInput
};
};

View File

@@ -1,25 +1,31 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { dispatchWorkFlow } from '../index';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { DYNAMIC_INPUT_KEY, ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { NodeInputKeyEnum, WorkflowIOValueTypeEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getPluginRuntimeById } from '../../../plugin/controller';
import { authPluginCanUse } from '../../../../support/permission/auth/plugin';
import { setEntryEntries } from '../utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import {
getDefaultEntryNodeIds,
initWorkflowEdgeStatus,
storeNodes2RuntimeNodes
} from '@fastgpt/global/core/workflow/runtime/utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { updateToolInputValue } from '../agent/runTool/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
type RunPluginProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.pluginId]: string;
[key: string]: any;
}>;
type RunPluginResponse = DispatchNodeResultType<{}>;
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
const {
node: { pluginId },
mode,
teamId,
tmbId,
params: { pluginId, ...data }
params: data
} = props;
if (!pluginId) {
@@ -30,37 +36,67 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
const plugin = await getPluginRuntimeById(pluginId);
// concat dynamic inputs
const inputModule = plugin.modules.find((item) => item.flowType === FlowNodeTypeEnum.pluginInput);
const inputModule = plugin.nodes.find(
(item) => item.flowNodeType === FlowNodeTypeEnum.pluginInput
);
if (!inputModule) return Promise.reject('Plugin error, It has no set input.');
const hasDynamicInput = inputModule.inputs.find((input) => input.key === DYNAMIC_INPUT_KEY);
const hasDynamicInput = inputModule.inputs.find(
(input) => input.key === NodeInputKeyEnum.addInputParam
);
const startParams: Record<string, any> = (() => {
if (!hasDynamicInput) return data;
const params: Record<string, any> = {
[DYNAMIC_INPUT_KEY]: {}
[NodeInputKeyEnum.addInputParam]: {}
};
for (const key in data) {
if (key === NodeInputKeyEnum.addInputParam) continue;
const input = inputModule.inputs.find((input) => input.key === key);
if (input) {
params[key] = data[key];
} else {
params[DYNAMIC_INPUT_KEY][key] = data[key];
params[NodeInputKeyEnum.addInputParam][key] = data[key];
}
}
return params;
})();
// replace input by dynamic variables
if (hasDynamicInput) {
for (const key in startParams) {
if (key === NodeInputKeyEnum.addInputParam) continue;
startParams[key] = replaceVariable(
startParams[key],
startParams[NodeInputKeyEnum.addInputParam]
);
}
}
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
...props,
modules: setEntryEntries(plugin.modules).map((module) => ({
...module,
showStatus: false
})),
runtimeModules: undefined, // must reset
startParams
runtimeNodes: storeNodes2RuntimeNodes(plugin.nodes, getDefaultEntryNodeIds(plugin.nodes)).map(
(node) => {
if (node.flowNodeType === FlowNodeTypeEnum.pluginInput) {
return {
...node,
showStatus: false,
inputs: updateToolInputValue({
inputs: node.inputs,
params: startParams
})
};
}
return {
...node,
showStatus: false
};
}
),
runtimeEdges: initWorkflowEdgeStatus(plugin.edges)
});
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);

View File

@@ -1,4 +1,4 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
export type PluginInputProps = ModuleDispatchProps<{
[key: string]: any;

View File

@@ -1,6 +1,6 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type.d';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type.d';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
export type PluginOutputProps = ModuleDispatchProps<{
[key: string]: any;

View File

@@ -1,14 +1,17 @@
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import {
DispatchNodeResponseKeyEnum,
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
import { responseWrite } from '../../../../common/response';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
export type AnswerProps = ModuleDispatchProps<{
text: string;
}>;
export type AnswerResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[NodeOutputKeyEnum.answerText]: string;
}>;
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
@@ -16,12 +19,13 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
res,
detail,
stream,
node: { name },
params: { text = '' }
} = props as AnswerProps;
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
if (stream) {
if (res && stream) {
responseWrite({
res,
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
@@ -32,6 +36,9 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
}
return {
[ModuleOutputKeyEnum.answerText]: formatText
[NodeOutputKeyEnum.answerText]: formatText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
textOutput: formatText
}
};
};

View File

@@ -1,16 +1,21 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
NodeInputKeyEnum,
NodeOutputKeyEnum,
WorkflowIOValueTypeEnum
} from '@fastgpt/global/core/workflow/constants';
import {
DispatchNodeResponseKeyEnum,
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { addLog } from '../../../../common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { responseWrite } from '../../../../common/response';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
type PropsArrType = {
key: string;
@@ -18,17 +23,17 @@ type PropsArrType = {
value: string;
};
type HttpRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.abandon_httpUrl]: string;
[ModuleInputKeyEnum.httpMethod]: string;
[ModuleInputKeyEnum.httpReqUrl]: string;
[ModuleInputKeyEnum.httpHeaders]: PropsArrType[];
[ModuleInputKeyEnum.httpParams]: PropsArrType[];
[ModuleInputKeyEnum.httpJsonBody]: string;
[DYNAMIC_INPUT_KEY]: Record<string, any>;
[NodeInputKeyEnum.abandon_httpUrl]: string;
[NodeInputKeyEnum.httpMethod]: string;
[NodeInputKeyEnum.httpReqUrl]: string;
[NodeInputKeyEnum.httpHeaders]: PropsArrType[];
[NodeInputKeyEnum.httpParams]: PropsArrType[];
[NodeInputKeyEnum.httpJsonBody]: string;
[NodeInputKeyEnum.addInputParam]: Record<string, any>;
[key: string]: any;
}>;
type HttpResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[NodeOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
@@ -36,11 +41,13 @@ const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<HttpResponse> => {
let {
res,
detail,
appId,
chatId,
responseChatItemId,
variables,
module: { outputs },
node: { outputs },
histories,
params: {
system_httpMethod: httpMethod = 'POST',
@@ -48,7 +55,7 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
system_httpHeader: httpHeader,
system_httpParams: httpParams = [],
system_httpJsonBody: httpJsonBody,
[DYNAMIC_INPUT_KEY]: dynamicInput,
[NodeInputKeyEnum.addInputParam]: dynamicInput,
...body
}
} = props;
@@ -63,19 +70,25 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
responseChatItemId,
...variables,
histories: histories.slice(-10),
...body
...body,
...dynamicInput
};
httpReqUrl = replaceVariable(httpReqUrl, concatVariables);
const allVariables = {
[NodeInputKeyEnum.addInputParam]: concatVariables,
...concatVariables
};
httpReqUrl = replaceVariable(httpReqUrl, allVariables);
// parse header
const headers = await (() => {
try {
if (!httpHeader || httpHeader.length === 0) return {};
// array
return httpHeader.reduce((acc: Record<string, string>, item) => {
const key = replaceVariable(item.key, concatVariables);
const value = replaceVariable(item.value, concatVariables);
acc[key] = valueTypeFormat(value, 'string');
const key = replaceVariable(item.key, allVariables);
const value = replaceVariable(item.value, allVariables);
acc[key] = valueTypeFormat(value, WorkflowIOValueTypeEnum.string);
return acc;
}, {});
} catch (error) {
@@ -83,18 +96,18 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
}
})();
const params = httpParams.reduce((acc: Record<string, string>, item) => {
const key = replaceVariable(item.key, concatVariables);
const value = replaceVariable(item.value, concatVariables);
acc[key] = valueTypeFormat(value, 'string');
const key = replaceVariable(item.key, allVariables);
const value = replaceVariable(item.value, allVariables);
acc[key] = valueTypeFormat(value, WorkflowIOValueTypeEnum.string);
return acc;
}, {});
const requestBody = await (() => {
if (!httpJsonBody) return { [DYNAMIC_INPUT_KEY]: dynamicInput };
httpJsonBody = replaceVariable(httpJsonBody, concatVariables);
if (!httpJsonBody) return {};
try {
httpJsonBody = replaceVariable(httpJsonBody, allVariables);
const jsonParse = JSON.parse(httpJsonBody);
const removeSignJson = removeUndefinedSign(jsonParse);
return { [DYNAMIC_INPUT_KEY]: dynamicInput, ...removeSignJson };
return removeSignJson;
} catch (error) {
console.log(error);
return Promise.reject(`Invalid JSON body: ${httpJsonBody}`);
@@ -118,6 +131,16 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
}
if (typeof formatResponse[NodeOutputKeyEnum.answerText] === 'string') {
responseWrite({
res,
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
data: textAdaptGptResponse({
text: formatResponse[NodeOutputKeyEnum.answerText]
})
});
}
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
@@ -127,13 +150,13 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
httpResult: rawResponse
},
[DispatchNodeResponseKeyEnum.toolResponses]: results,
[ModuleOutputKeyEnum.httpRawResponse]: rawResponse,
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
...results
};
} catch (error) {
addLog.error('Http request error', error);
return {
[ModuleOutputKeyEnum.failed]: true,
[NodeOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
@@ -141,7 +164,7 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: { error: formatHttpError(error) }
},
[ModuleOutputKeyEnum.httpRawResponse]: getErrText(error)
[NodeOutputKeyEnum.httpRawResponse]: getErrText(error)
};
}
};

View File

@@ -1,27 +1,27 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { ModelTypeEnum, getLLMModel } from '../../../../core/ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { queryExtension } from '../../../../core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.userChatInput]: string;
}>;
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.text]: string;
[NodeOutputKeyEnum.text]: string;
}>;
export const dispatchQueryExtension = async ({
histories,
module,
node,
params: { model, systemPrompt, history, userChatInput }
}: Props): Promise<Response> => {
if (!userChatInput) {
@@ -65,12 +65,12 @@ export const dispatchQueryExtension = async ({
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: module.name,
moduleName: node.name,
totalPoints,
model: modelName,
tokens
}
],
[ModuleOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
[NodeOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
};
};

View File

@@ -1,26 +1,31 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { SelectAppItemType } from '@fastgpt/global/core/module/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { SelectAppItemType } from '@fastgpt/global/core/workflow/type/index.d';
import { dispatchWorkFlow } from '../index';
import { MongoApp } from '../../../../core/app/schema';
import { responseWrite } from '../../../../common/response';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getHistories, setEntryEntries } from '../utils';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import {
getDefaultEntryNodeIds,
initWorkflowEdgeStatus,
storeNodes2RuntimeNodes,
textAdaptGptResponse
} from '@fastgpt/global/core/workflow/runtime/utils';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getHistories } from '../utils';
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
app: SelectAppItemType;
}>;
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[ModuleOutputKeyEnum.history]: ChatItemType[];
[NodeOutputKeyEnum.answerText]: string;
[NodeOutputKeyEnum.history]: ChatItemType[];
}>;
export const dispatchAppRequest = async (props: Props): Promise<Response> => {
@@ -48,7 +53,7 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
return Promise.reject('App not found');
}
if (stream) {
if (res && stream) {
responseWrite({
res,
event: detail ? SseResponseEventEnum.answer : undefined,
@@ -63,11 +68,12 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
...props,
appId: app.id,
modules: setEntryEntries(appData.modules),
runtimeModules: undefined, // must reset
runtimeNodes: storeNodes2RuntimeNodes(appData.modules, getDefaultEntryNodeIds(appData.modules)),
runtimeEdges: initWorkflowEdgeStatus(appData.edges),
histories: chatHistories,
inputFiles,
startParams: {
variables: {
...props.variables,
userChatInput
}
});

View File

@@ -0,0 +1,70 @@
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { VariableConditionEnum } from '@fastgpt/global/core/workflow/template/system/ifElse/constant';
import {
IfElseConditionType,
IfElseListItemType
} from '@fastgpt/global/core/workflow/template/system/ifElse/type';
import { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.condition]: IfElseConditionType;
[NodeInputKeyEnum.ifElseList]: IfElseListItemType[];
}>;
function checkCondition(condition: VariableConditionEnum, variableValue: any, value: string) {
const operations = {
[VariableConditionEnum.isEmpty]: () => !variableValue,
[VariableConditionEnum.isNotEmpty]: () => !!variableValue,
[VariableConditionEnum.equalTo]: () => variableValue === value,
[VariableConditionEnum.notEqual]: () => variableValue !== value,
[VariableConditionEnum.greaterThan]: () => variableValue > Number(value),
[VariableConditionEnum.lessThan]: () => variableValue < Number(value),
[VariableConditionEnum.greaterThanOrEqualTo]: () => variableValue >= Number(value),
[VariableConditionEnum.lessThanOrEqualTo]: () => variableValue <= Number(value),
[VariableConditionEnum.include]: () => variableValue.includes(value),
[VariableConditionEnum.notInclude]: () => !variableValue.includes(value),
[VariableConditionEnum.startWith]: () => variableValue.startsWith(value),
[VariableConditionEnum.endWith]: () => variableValue.endsWith(value),
[VariableConditionEnum.lengthEqualTo]: () => variableValue.length === Number(value),
[VariableConditionEnum.lengthNotEqualTo]: () => variableValue.length !== Number(value),
[VariableConditionEnum.lengthGreaterThan]: () => variableValue.length > Number(value),
[VariableConditionEnum.lengthGreaterThanOrEqualTo]: () => variableValue.length >= Number(value),
[VariableConditionEnum.lengthLessThan]: () => variableValue.length < Number(value),
[VariableConditionEnum.lengthLessThanOrEqualTo]: () => variableValue.length <= Number(value)
};
return (operations[condition] || (() => false))();
}
export const dispatchIfElse = async (props: Props): Promise<DispatchNodeResultType<{}>> => {
const {
params,
runtimeNodes,
node: { nodeId }
} = props;
const { condition, ifElseList } = params;
const listResult = ifElseList.map((item) => {
const { variable, condition: variableCondition, value } = item;
const variableValue = runtimeNodes
.find((node) => node.nodeId === variable[0])
?.outputs?.find((item) => item.id === variable[1])?.value;
return checkCondition(variableCondition as VariableConditionEnum, variableValue, value || '');
});
const result = condition === 'AND' ? listResult.every(Boolean) : listResult.some(Boolean);
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
ifElseResult: result ? 'IF' : 'ELSE'
},
[DispatchNodeResponseKeyEnum.skipHandleId]: result
? [getHandleId(nodeId, 'source', 'ELSE')]
: [getHandleId(nodeId, 'source', 'IF')]
};
};

View File

@@ -1,23 +1,19 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { addLog } from '../../../../common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type LafRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.httpReqUrl]: string;
[DYNAMIC_INPUT_KEY]: Record<string, any>;
[NodeInputKeyEnum.httpReqUrl]: string;
[NodeInputKeyEnum.addInputParam]: Record<string, any>;
[key: string]: any;
}>;
type LafResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[NodeOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
@@ -29,9 +25,13 @@ export const dispatchLafRequest = async (props: LafRequestProps): Promise<LafRes
chatId,
responseChatItemId,
variables,
module: { outputs },
node: { outputs },
histories,
params: { system_httpReqUrl: httpReqUrl, [DYNAMIC_INPUT_KEY]: dynamicInput, ...body }
params: {
system_httpReqUrl: httpReqUrl,
[NodeInputKeyEnum.addInputParam]: dynamicInput,
...body
}
} = props;
if (!httpReqUrl) {
@@ -83,13 +83,13 @@ export const dispatchLafRequest = async (props: LafRequestProps): Promise<LafRes
httpResult: rawResponse
},
[DispatchNodeResponseKeyEnum.toolResponses]: rawResponse,
[ModuleOutputKeyEnum.httpRawResponse]: rawResponse,
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
...results
};
} catch (error) {
addLog.error('Http request error', error);
return {
[ModuleOutputKeyEnum.failed]: true,
[NodeOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,

View File

@@ -4,12 +4,19 @@ import {
ChatItemValueItemType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export type DispatchFlowResponse = {
flowResponses: ChatHistoryItemResType[];
flowUsages: ChatNodeUsageType[];
debugResponse: {
finishedNodes: RuntimeNodeItemType[];
finishedEdges: RuntimeEdgeItemType[];
nextStepRunNodes: RuntimeNodeItemType[];
};
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
};

View File

@@ -1,43 +1,49 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ModuleIOValueTypeEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { ModuleItemType } from '@fastgpt/global/core/module/type.d';
import {
WorkflowIOValueTypeEnum,
NodeOutputKeyEnum
} from '@fastgpt/global/core/workflow/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import {
RuntimeEdgeItemType,
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/index.d';
export const setEntryEntries = (modules: ModuleItemType[]) => {
const initRunningModuleType: Record<string, boolean> = {
[FlowNodeTypeEnum.historyNode]: true,
[FlowNodeTypeEnum.questionInput]: true,
[FlowNodeTypeEnum.pluginInput]: true
};
modules.forEach((item) => {
if (initRunningModuleType[item.flowType]) {
item.isEntry = true;
}
});
return modules;
export const filterToolNodeIdByEdges = ({
nodeId,
edges
}: {
nodeId: string;
edges: RuntimeEdgeItemType[];
}) => {
return edges
.filter(
(edge) => edge.source === nodeId && edge.targetHandle === NodeOutputKeyEnum.selectedTools
)
.map((edge) => edge.target);
};
export const checkTheModuleConnectedByTool = (
modules: ModuleItemType[],
module: ModuleItemType
) => {
let sign = false;
const toolModules = modules.filter((item) => item.flowType === FlowNodeTypeEnum.tools);
// export const checkTheModuleConnectedByTool = (
// modules: StoreNodeItemType[],
// node: StoreNodeItemType
// ) => {
// let sign = false;
// const toolModules = modules.filter((item) => item.flowNodeType === FlowNodeTypeEnum.tools);
toolModules.forEach((item) => {
const toolOutput = item.outputs.find(
(output) => output.key === ModuleOutputKeyEnum.selectedTools
);
toolOutput?.targets.forEach((target) => {
if (target.moduleId === module.moduleId) {
sign = true;
}
});
});
// toolModules.forEach((item) => {
// const toolOutput = item.outputs.find(
// (output) => output.key === NodeOutputKeyEnum.selectedTools
// );
// toolOutput?.targets.forEach((target) => {
// if (target.moduleId === node.moduleId) {
// sign = true;
// }
// });
// });
return sign;
};
// return sign;
// };
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
if (!history) return [];
@@ -48,7 +54,7 @@ export const getHistories = (history?: ChatItemType[] | number, histories: ChatI
};
/* value type format */
export const valueTypeFormat = (value: any, type?: `${ModuleIOValueTypeEnum}`) => {
export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
if (value === undefined) return;
if (type === 'string') {
@@ -57,6 +63,16 @@ export const valueTypeFormat = (value: any, type?: `${ModuleIOValueTypeEnum}`) =
}
if (type === 'number') return Number(value);
if (type === 'boolean') return Boolean(value);
try {
if (type === WorkflowIOValueTypeEnum.datasetQuote && !Array.isArray(value)) {
return JSON.parse(value);
}
if (type === WorkflowIOValueTypeEnum.selectDataset && !Array.isArray(value)) {
return JSON.parse(value);
}
} catch (error) {
return value;
}
return value;
};

View File

@@ -0,0 +1,321 @@
// @ts-nocheck
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import {
countMessagesTokens,
countGptMessagesTokens
} from '../../../../common/string/tiktoken/index';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
}>;
type CQResponse = DispatchNodeResultType<{
[key: string]: any;
}>;
type ActionProps = Props & { cqModel: LLMModelItemType };
const agentFunName = 'classify_question';
/* request openai chat */
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
const {
user,
module: { name },
histories,
params: { model, history = 6, agents, userChatInput }
} = props as Props;
if (!userChatInput) {
return Promise.reject('Input is empty');
}
const cqModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { arg, tokens } = await (async () => {
if (cqModel.toolChoice) {
return toolChoice({
...props,
histories: chatHistories,
cqModel
});
}
if (cqModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
cqModel
});
}
return completions({
...props,
histories: chatHistories,
cqModel
});
})();
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
const { totalPoints, modelName } = formatModelChars2Points({
model: cqModel.model,
tokens,
modelType: ModelTypeEnum.llm
});
return {
[result.key]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: userChatInput,
tokens,
cqList: agents,
cqResult: result.value,
contextTotalLen: chatHistories.length + 2
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens
}
]
};
};
const getFunctionCallSchema = async ({
cqModel,
histories,
params: { agents, systemPrompt, userChatInput }
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: systemPrompt
? `<背景知识>
${systemPrompt}
</背景知识>
问题: "${userChatInput}"
`
: userChatInput
}
}
]
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = await filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: cqModel.maxContext
});
// function body
const agentFunction = {
name: agentFunName,
description: '结合对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
parameters: {
type: 'object',
properties: {
type: {
type: 'string',
description: `问题类型。下面是几种可选的问题类型: ${agents
.map((item) => `${item.value},返回:'${item.key}'`)
.join('')}`,
enum: agents.map((item) => item.key)
}
},
required: ['type']
}
};
return {
agentFunction,
filterMessages
};
};
const toolChoice = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
// function body
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
try {
const arg = JSON.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
);
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
arg,
tokens: await countGptMessagesTokens(completeMessages, tools)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const functionCall = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: await countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
cqModel,
user,
histories,
params: { agents, systemPrompt = '', userChatInput }
}: ActionProps) => {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
.join('\n'),
history: histories
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
.join('\n'),
question: userChatInput
})
}
}
]
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0.01,
messages: chats2GPTMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
const id =
agents.find((item) => answer.includes(item.key) || answer.includes(item.value))?.key || '';
return {
tokens: await countMessagesTokens(messages),
arg: { type: id }
};
};

View File

@@ -0,0 +1,384 @@
// @ts-nocheck
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import {
countMessagesTokens,
countGptMessagesTokens
} from '../../../../common/string/tiktoken/index';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getHistories } from '../utils';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import json5 from 'json5';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.contextExtractInput]: string;
[NodeInputKeyEnum.extractKeys]: ContextExtractAgentItemType[];
[NodeInputKeyEnum.description]: string;
[NodeInputKeyEnum.aiModel]: string;
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.success]?: boolean;
[NodeOutputKeyEnum.failed]?: boolean;
[NodeOutputKeyEnum.contextExtractFields]: string;
}>;
type ActionProps = Props & { extractModel: LLMModelItemType };
const agentFunName = 'request_function';
export async function dispatchContentExtract(props: Props): Promise<Response> {
const {
user,
module: { name },
histories,
params: { content, history = 6, model, description, extractKeys }
} = props;
if (!content) {
return Promise.reject('Input is empty');
}
const extractModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { arg, tokens } = await (async () => {
if (extractModel.toolChoice) {
return toolChoice({
...props,
histories: chatHistories,
extractModel
});
}
if (extractModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
extractModel
});
}
return completions({
...props,
histories: chatHistories,
extractModel
});
})();
// remove invalid key
for (let key in arg) {
const item = extractKeys.find((item) => item.key === key);
if (!item) {
delete arg[key];
}
if (arg[key] === '') {
delete arg[key];
}
}
// auto fill required fields
extractKeys.forEach((item) => {
if (item.required && !arg[item.key]) {
arg[item.key] = item.defaultValue || '';
}
});
// auth fields
let success = !extractKeys.find((item) => !(item.key in arg));
// auth empty value
if (success) {
for (const key in arg) {
const item = extractKeys.find((item) => item.key === key);
if (!item) {
success = false;
break;
}
}
}
const { totalPoints, modelName } = formatModelChars2Points({
model: extractModel.model,
tokens,
modelType: ModelTypeEnum.llm
});
return {
[NodeOutputKeyEnum.success]: success ? true : undefined,
[NodeOutputKeyEnum.failed]: success ? undefined : true,
[NodeOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
...arg,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: content,
tokens,
extractDescription: description,
extractResult: arg,
contextTotalLen: chatHistories.length + 2
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens
}
]
};
}
const getFunctionCallSchema = async ({
extractModel,
histories,
params: { content, extractKeys, description }
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求:
"""
${description ? `- ${description}` : ''}
- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。
- 需要结合前面的对话内容,一起生成合适的参数。
"""
本次输入内容: ${content}
`
}
}
]
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = await filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: extractModel.maxContext
});
const properties: Record<
string,
{
type: string;
description: string;
}
> = {};
extractKeys.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.desc,
...(item.enum ? { enum: item.enum.split('\n') } : {})
};
});
// function body
const agentFunction = {
name: agentFunName,
description: '需要执行的函数',
parameters: {
type: 'object',
properties
}
};
return {
filterMessages,
agentFunction
};
};
const toolChoice = async (props: ActionProps) => {
const { user, extractModel } = props;
const { filterMessages, agentFunction } = await getFunctionCallSchema(props);
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
const arg: Record<string, any> = (() => {
try {
return json5.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '{}'
);
} catch (error) {
console.log(agentFunction.parameters);
console.log(response.choices?.[0]?.message?.tool_calls?.[0]?.function);
console.log('Your model may not support tool_call', error);
return {};
}
})();
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
tokens: await countGptMessagesTokens(completeMessages, tools),
arg
};
};
const functionCall = async (props: ActionProps) => {
const { user, extractModel } = props;
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: await countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
extractModel,
user,
histories,
params: { content, extractKeys, description }
}: ActionProps) => {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
description,
json: extractKeys
.map(
(item) =>
`{"key":"${item.key}", "description":"${item.desc}"${
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
}}`
)
.join('\n'),
text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')}
Human: ${content}`
})
}
}
]
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: chats2GPTMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
// parse response
const start = answer.indexOf('{');
const end = answer.lastIndexOf('}');
if (start === -1 || end === -1) {
return {
rawResponse: answer,
tokens: await countMessagesTokens(messages),
arg: {}
};
}
const jsonStr = answer
.substring(start, end + 1)
.replace(/(\\n|\\)/g, '')
.replace(/ /g, '');
try {
return {
rawResponse: answer,
tokens: await countMessagesTokens(messages),
arg: json5.parse(jsonStr) as Record<string, any>
};
} catch (error) {
console.log(error);
return {
rawResponse: answer,
tokens: await countMessagesTokens(messages),
arg: {}
};
}
};

View File

@@ -0,0 +1,39 @@
export const Prompt_Tool_Call = `<Instruction>
你是一个智能机器人,除了可以回答用户问题外,你还掌握工具的使用能力。有时候,你可以依赖工具的运行结果,来更准确的回答用户。
工具使用了 JSON Schema 的格式声明,其中 toolId 是工具的 description 是工具的描述parameters 是工具的参数包括参数的类型和描述required 是必填参数的列表。
请你根据工具描述决定回答问题或是使用工具。在完成任务过程中USER代表用户的输入TOOL_RESPONSE代表工具运行结果。ASSISTANT 代表你的输出。
你的每次输出都必须以0,1开头代表是否需要调用工具
0: 不使用工具,直接回答内容。
1: 使用工具,返回工具调用的参数。
例如:
USER: 你好呀
ANSWER: 0: 你好,有什么可以帮助你的么?
USER: 今天杭州的天气如何
ANSWER: 1: {"toolId":"testToolId",arguments:{"city": "杭州"}}
TOOL_RESPONSE: """
晴天......
"""
ANSWER: 0: 今天杭州是晴天。
USER: 今天杭州的天气适合去哪里玩?
ANSWER: 1: {"toolId":"testToolId2",arguments:{"query": "杭州 天气 去哪里玩"}}
TOOL_RESPONSE: """
晴天. 西湖、灵隐寺、千岛湖……
"""
ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地玩。
</Instruction>
现在,我们开始吧!下面是你本次可以使用的工具:
"""
{{toolsPrompt}}
"""
下面是正式的对话内容:
USER: {{question}}
ANSWER:
`;

View File

@@ -0,0 +1,410 @@
// @ts-nocheck
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
import {
ChatCompletion,
StreamChatType,
ChatCompletionMessageParam,
ChatCompletionCreateParams,
ChatCompletionMessageFunctionCall,
ChatCompletionFunctionMessageParam,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlowV1 } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
type FunctionRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
functionCallMsg: ChatCompletionFunctionMessageParam;
}[];
export const runToolWithFunctionCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const assistantResponses = response?.assistantResponses || [];
const functions: ChatCompletionCreateParams.Function[] = toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
name: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
});
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages,
functions,
function_call: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answer, functionCalls } = await (async () => {
if (stream) {
return streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const function_call = result.choices?.[0]?.message?.function_call;
const toolModule = toolModules.find((module) => module.moduleId === function_call?.name);
const toolCalls = function_call
? [
{
...function_call,
id: getNanoid(),
toolName: toolModule?.name,
toolAvatar: toolModule?.avatar
}
]
: [];
return {
answer: result.choices?.[0]?.message?.content || '',
functionCalls: toolCalls
};
}
})();
// Run the selected tool.
const toolsRunResponse = (
await Promise.all(
functionCalls.map(async (tool) => {
if (!tool) return;
const toolModule = toolModules.find((module) => module.moduleId === tool.name);
if (!toolModule) return;
const startParams = (() => {
try {
return json5.parse(tool.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlowV1({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
const functionCallMsg: ChatCompletionFunctionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Function,
name: tool.name,
content: stringToolResponse
};
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
}
})
});
}
return {
moduleRunResponse,
functionCallMsg
};
})
)
).filter(Boolean) as FunctionRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
const functionCall = functionCalls[0];
if (functionCall && !res.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: functionCall
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.functionCallMsg)
];
// console.log(tokens, 'tool');
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [
...assistantResponses,
...toolAssistants,
...toolNodeAssistant.value
];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
/* check stop signal */
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
if (hasStopSignal) {
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages: filterMessages,
assistantResponses: toolNodeAssistants
};
}
return runToolWithFunctionCall(
{
...props,
messages: completeMessages
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
assistantResponses: toolNodeAssistants
}
);
} else {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = await countGptMessagesTokens(completeMessages, undefined, functions);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
};
}
};
async function streamResponse({
res,
detail,
toolModules,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let textAnswer = '';
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
let functionId = getNanoid();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (responseChoice.function_call) {
const functionCall: {
arguments: string;
name?: string;
} = responseChoice.function_call;
// 流响应中,每次只会返回一个函数如果带了name说明触发某个函数
if (functionCall?.name) {
functionId = getNanoid();
const toolModule = toolModules.find((module) => module.moduleId === functionCall?.name);
if (toolModule) {
if (functionCall?.arguments === undefined) {
functionCall.arguments = '';
}
functionCalls.push({
...functionCall,
id: functionId,
name: functionCall.name,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
});
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: functionId,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: functionCall.name,
params: functionCall.arguments,
response: ''
}
})
});
}
}
}
/* arg 插入最后一个工具的参数里 */
const arg: string = functionCall?.arguments || '';
const currentTool = functionCalls[functionCalls.length - 1];
if (currentTool) {
currentTool.arguments += arg;
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolParams,
data: JSON.stringify({
tool: {
id: functionId,
toolName: '',
toolAvatar: '',
params: arg,
response: ''
}
})
});
}
}
}
}
if (!textAnswer && functionCalls.length === 0) {
return Promise.reject('LLM api response empty');
}
return { answer: textAnswer, functionCalls };
}

View File

@@ -0,0 +1,158 @@
// @ts-nocheck
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type {
DispatchNodeResultType,
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
import { getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolModuleItemType } from './type.d';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
chats2GPTMessages,
getSystemPrompt,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { runToolWithFunctionCall } from './functionCall';
import { runToolWithPromptCall } from './promptCall';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_Tool_Call } from './constants';
type Response = DispatchNodeResultType<{}>;
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
module: { name, outputs },
runtimeModules,
histories,
params: { model, systemPrompt, userChatInput, history = 6 }
} = props;
const toolModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
/* get tool params */
// get tool output targets
const toolOutput = outputs.find((output) => output.key === NodeOutputKeyEnum.selectedTools);
if (!toolOutput) {
return Promise.reject('No tool output found');
}
const targets = toolOutput.targets;
// Gets the module to which the tool is connected
const toolModules = targets
.map((item) => {
const tool = runtimeModules.find((module) => module.moduleId === item.moduleId);
return tool;
})
.filter(Boolean)
.map<ToolModuleItemType>((tool) => {
const toolParams = tool?.inputs.filter((input) => !!input.toolDescription) || [];
return {
...(tool as RuntimeNodeItemType),
toolParams
};
});
const messages: ChatItemType[] = [
...getSystemPrompt(systemPrompt),
...chatHistories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
text: userChatInput,
files: []
})
}
];
const {
dispatchFlowResponse, // tool flow response
totalTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [] // FastGPT system store assistant.value response
} = await (async () => {
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
if (toolModel.toolChoice) {
return runToolWithToolChoice({
...props,
toolModules,
toolModel,
messages: adaptMessages
});
}
if (toolModel.functionCall) {
return runToolWithFunctionCall({
...props,
toolModules,
toolModel,
messages: adaptMessages
});
}
const lastMessage = adaptMessages[adaptMessages.length - 1];
if (typeof lastMessage.content !== 'string') {
return Promise.reject('暂时只支持纯文本');
}
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
question: userChatInput
});
return runToolWithPromptCall({
...props,
toolModules,
toolModel,
messages: adaptMessages
});
})();
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens: totalTokens,
modelType: ModelTypeEnum.llm
});
// flat child tool response
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
// concat tool usage
const totalPointsUsage =
totalPoints +
dispatchFlowResponse.reduce((sum, item) => {
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
return sum + childrenTotal;
}, 0);
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
return {
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: totalPointsUsage,
toolCallTokens: totalTokens,
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false)),
toolDetail: childToolResponse
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints,
model: modelName,
tokens: totalTokens
},
...flatUsages
]
};
};

View File

@@ -0,0 +1,388 @@
// @ts-nocheck
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
import {
ChatCompletion,
StreamChatType,
ChatCompletionMessageParam,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlowV1 } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken';
import { getNanoid, replaceVariable } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
type FunctionCallCompletion = {
id: string;
name: string;
arguments: string;
toolName?: string;
toolAvatar?: string;
};
export const runToolWithPromptCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const assistantResponses = response?.assistantResponses || [];
const toolsPrompt = JSON.stringify(
toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
toolId: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
})
);
const lastMessage = messages[messages.length - 1];
if (typeof lastMessage.content !== 'string') {
return Promise.reject('暂时只支持纯文本');
}
lastMessage.content = replaceVariable(lastMessage.content, {
toolsPrompt
});
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
// console.log(JSON.stringify(filterMessages, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const answer = await (async () => {
if (stream) {
const { answer } = await streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
return answer;
} else {
const result = aiResponse as ChatCompletion;
return result.choices?.[0]?.message?.content || '';
}
})();
const parseAnswerResult = parseAnswer(answer);
// console.log(parseAnswer, '==11==');
// No tools
if (typeof parseAnswerResult === 'string') {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: parseAnswerResult
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = await countGptMessagesTokens(completeMessages, undefined);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
};
}
// Run the selected tool.
const toolsRunResponse = await (async () => {
if (!parseAnswerResult) return Promise.reject('tool run error');
const toolModule = toolModules.find((module) => module.moduleId === parseAnswerResult.name);
if (!toolModule) return Promise.reject('tool not found');
parseAnswerResult.toolName = toolModule.name;
parseAnswerResult.toolAvatar = toolModule.avatar;
// run tool flow
const startParams = (() => {
try {
return json5.parse(parseAnswerResult.arguments);
} catch (error) {
return {};
}
})();
// SSE response to client
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: parseAnswerResult.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: parseAnswerResult.name,
params: parseAnswerResult.arguments,
response: ''
}
})
});
}
const moduleRunResponse = await dispatchWorkFlowV1({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: parseAnswerResult.id,
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
}
})
});
}
return {
moduleRunResponse,
toolResponsePrompt: stringToolResponse
};
})();
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
// 合并工具调用的结果,使用 functionCall 格式存储。
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: parseAnswerResult
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);
const completeMessages: ChatCompletionMessageParam[] = [
...concatToolMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Function,
name: parseAnswerResult.name,
content: toolsRunResponse.toolResponsePrompt
}
];
// tool assistant
const toolAssistants = toolsRunResponse.moduleRunResponse.assistantResponses || [];
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolAssistants, ...toolNodeAssistant.value];
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse)
: [toolsRunResponse.moduleRunResponse];
// get the next user prompt
lastMessage.content += `${answer}
TOOL_RESPONSE: """
${toolsRunResponse.toolResponsePrompt}
"""
ANSWER: `;
/* check stop signal */
const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some(
(item) => !!item.toolStop
);
if (hasStopSignal) {
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages: filterMessages,
assistantResponses: toolNodeAssistants
};
}
return runToolWithPromptCall(
{
...props,
messages
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
assistantResponses: toolNodeAssistants
}
);
};
async function streamResponse({
res,
detail,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let startResponseWrite = false;
let textAnswer = '';
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
if (startResponseWrite) {
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (textAnswer.length >= 3) {
textAnswer = textAnswer.trim();
if (textAnswer.startsWith('0')) {
startResponseWrite = true;
// find first : index
const firstIndex = textAnswer.indexOf(':');
textAnswer = textAnswer.substring(firstIndex + 1).trim();
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: textAnswer
})
});
}
}
}
}
if (!textAnswer) {
return Promise.reject('LLM api response empty');
}
// console.log(textAnswer, '---===');
return { answer: textAnswer.trim() };
}
const parseAnswer = (str: string): FunctionCallCompletion | string => {
// 首先使用正则表达式提取TOOL_ID和TOOL_ARGUMENTS
const prefix = '1:';
str = str.trim();
if (str.startsWith(prefix)) {
const toolString = str.substring(prefix.length).trim();
try {
const toolCall = json5.parse(toolString);
return {
id: getNanoid(),
name: toolCall.toolId,
arguments: JSON.stringify(toolCall.arguments || toolCall.parameters)
};
} catch (error) {
return str;
}
} else {
return str;
}
};

View File

@@ -0,0 +1,14 @@
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
export type AnswerProps = ModuleDispatchProps<{}>;
export type AnswerResponse = DispatchNodeResultType<{}>;
export const dispatchStopToolCall = (props: Record<string, any>): AnswerResponse => {
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
toolStop: true
}
};
};

View File

@@ -0,0 +1,413 @@
// @ts-nocheck
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
import {
ChatCompletion,
ChatCompletionMessageToolCall,
StreamChatType,
ChatCompletionToolMessageParam,
ChatCompletionAssistantToolParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlowV1 } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
type ToolRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
/*
调用思路
1. messages 接收发送给AI的消息
2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses)
3. 如果运行工具的话则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。
*/
export const runToolWithToolChoice = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const assistantResponses = response?.assistantResponses || [];
const tools: ChatCompletionTool[] = toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
type: 'function',
function: {
name: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
}
};
});
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
});
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages,
tools,
tool_choice: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answer, toolCalls } = await (async () => {
if (stream) {
return streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const calls = result.choices?.[0]?.message?.tool_calls || [];
// 加上name和avatar
const toolCalls = calls.map((tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
return {
...tool,
toolName: toolModule?.name || '',
toolAvatar: toolModule?.avatar || ''
};
});
return {
answer: result.choices?.[0]?.message?.content || '',
toolCalls: toolCalls
};
}
})();
// Run the selected tool.
const toolsRunResponse = (
await Promise.all(
toolCalls.map(async (tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
if (!toolModule) return;
const startParams = (() => {
try {
return json5.parse(tool.function.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlowV1({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: stringToolResponse
};
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
}
})
});
}
return {
moduleRunResponse,
toolMsgParams
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
if (toolCalls.length > 0 && !res.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
// console.log(tokens, 'tool');
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [
...assistantResponses,
...toolAssistants,
...toolNodeAssistant.value
];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
/* check stop signal */
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
if (hasStopSignal) {
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: toolNodeAssistants
};
}
return runToolWithToolChoice(
{
...props,
messages: completeMessages
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
assistantResponses: toolNodeAssistants
}
);
} else {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = await countGptMessagesTokens(completeMessages, tools);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
};
}
};
async function streamResponse({
res,
detail,
toolModules,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let textAnswer = '';
let toolCalls: ChatCompletionMessageToolCall[] = [];
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
// console.log(JSON.stringify(responseChoice, null, 2));
if (responseChoice?.content) {
const content = responseChoice.content || '';
textAnswer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (responseChoice?.tool_calls?.[0]) {
const toolCall: ChatCompletionMessageToolCall = responseChoice.tool_calls[0];
// 流响应中,每次只会返回一个工具. 如果带了 id说明是执行一个工具
if (toolCall.id) {
const toolModule = toolModules.find(
(module) => module.moduleId === toolCall.function?.name
);
if (toolModule) {
if (toolCall.function?.arguments === undefined) {
toolCall.function.arguments = '';
}
toolCalls.push({
...toolCall,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
});
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: toolCall.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: toolCall.function.name,
params: toolCall.function.arguments,
response: ''
}
})
});
}
}
}
/* arg 插入最后一个工具的参数里 */
const arg: string = responseChoice.tool_calls?.[0]?.function?.arguments;
const currentTool = toolCalls[toolCalls.length - 1];
if (currentTool) {
currentTool.function.arguments += arg;
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolParams,
data: JSON.stringify({
tool: {
id: currentTool.id,
toolName: '',
toolAvatar: '',
params: arg,
response: ''
}
})
});
}
}
}
}
if (!textAnswer && toolCalls.length === 0) {
return Promise.reject('LLM api response empty');
}
return { answer: textAnswer, toolCalls };
}

View File

@@ -0,0 +1,28 @@
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/node/type';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
} from '@fastgpt/global/core/workflow/type.d';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type.d';
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.userChatInput]: string;
}>;
export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
totalTokens: number;
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
};
export type ToolModuleItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
};

View File

@@ -0,0 +1,408 @@
// @ts-nocheck
import type { NextApiResponse } from 'next';
import {
filterGPTMessageByMaxTokens,
formatGPTMessagesInRequestBefore,
loadChatImgToBase64
} from '../../../chat/utils';
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { getAIApi } from '../../../ai/config';
import type {
ChatCompletion,
ChatCompletionMessageParam,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type { FlowNodeItemType } from '@fastgpt/global/core/workflow/type';
import type { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import {
countMessagesTokens,
countGptMessagesTokens
} from '../../../../common/string/tiktoken/index';
import {
chats2GPTMessages,
getSystemPrompt,
GPTMessages2Chats,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import {
Prompt_QuotePromptList,
Prompt_QuoteTemplateList
} from '@fastgpt/global/core/ai/prompt/AIChat';
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { responseWrite, responseWriteController } from '../../../../common/response';
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '../../utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
}
>;
export type ChatResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
[NodeOutputKeyEnum.history]: ChatItemType[];
}>;
/* request openai chat */
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
let {
res,
stream = false,
detail = false,
user,
histories,
module: { name, outputs },
inputFiles = [],
params: {
model,
temperature = 0,
maxToken = 4000,
history = 6,
quoteQA,
userChatInput,
isResponseAnswerText = true,
systemPrompt = '',
quoteTemplate,
quotePrompt
}
} = props;
if (!userChatInput && inputFiles.length === 0) {
return Promise.reject('Question is empty');
}
stream = stream && isResponseAnswerText;
const chatHistories = getHistories(history, histories);
// temperature adapt
const modelConstantsData = getLLMModel(model);
if (!modelConstantsData) {
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
const { quoteText } = await filterQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
});
// censor model and system key
if (modelConstantsData.censor && !user.openaiAccount?.key) {
await postTextCensor({
text: `${systemPrompt}
${quoteText}
${userChatInput}
`
});
}
const { filterMessages } = await getChatMessages({
model: modelConstantsData,
histories: chatHistories,
quoteQA,
quoteText,
quotePrompt,
userChatInput,
inputFiles,
systemPrompt
});
const { max_tokens } = await getMaxTokens({
model: modelConstantsData,
maxToken,
filterMessages
});
// FastGPT temperature range: 1~10
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const concatMessages = [
...(modelConstantsData.defaultSystemChatPrompt
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystemChatPrompt
}
]
: []),
...formatGPTMessagesInRequestBefore(filterMessages)
] as ChatCompletionMessageParam[];
if (concatMessages.length === 0) {
return Promise.reject('core.chat.error.Messages empty');
}
const loadMessages = await Promise.all(
concatMessages.map(async (item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
return {
...item,
content: await loadChatImgToBase64(item.content)
};
} else {
return item;
}
})
);
const response = await ai.chat.completions.create(
{
...modelConstantsData?.defaultConfig,
model: modelConstantsData.model,
temperature,
max_tokens,
stream,
messages: loadMessages
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answerText } = await (async () => {
if (res && stream) {
// sse response
const { answer } = await streamResponse({
res,
detail,
stream: response
});
targetResponse({ res, detail, outputs });
return {
answerText: answer
};
} else {
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
return {
answerText: answer
};
}
})();
const completeMessages = filterMessages.concat({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answerText
});
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
const tokens = await countMessagesTokens(chatCompleteMessages);
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens,
modelType: ModelTypeEnum.llm
});
return {
answerText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(chatCompleteMessages),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens
}
],
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
history: chatCompleteMessages
};
};
async function filterQuote({
quoteQA = [],
model,
quoteTemplate
}: {
quoteQA: ChatProps['params']['quoteQA'];
model: LLMModelItemType;
quoteTemplate?: string;
}) {
function getValue(item: SearchDataResponseItemType, index: number) {
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
q: item.q,
a: item.a,
source: item.sourceName,
sourceId: String(item.sourceId || 'UnKnow'),
index: index + 1
});
}
// slice filterSearch
const filterQuoteQA = await filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
const quoteText =
filterQuoteQA.length > 0
? `${filterQuoteQA.map((item, index) => getValue(item, index).trim()).join('\n------\n')}`
: '';
return {
quoteText
};
}
async function getChatMessages({
quotePrompt,
quoteText,
quoteQA,
histories = [],
systemPrompt,
userChatInput,
inputFiles,
model
}: {
quotePrompt?: string;
quoteText: string;
quoteQA: ChatProps['params']['quoteQA'];
histories: ChatItemType[];
systemPrompt: string;
userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType;
}) {
const replaceInputValue =
quoteQA !== undefined
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
quote: quoteText,
question: userChatInput
})
: userChatInput;
const messages: ChatItemType[] = [
...getSystemPrompt(systemPrompt),
...histories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
files: inputFiles,
text: replaceInputValue
})
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = await filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: model.maxContext - 300 // filter token. not response maxToken
});
return {
filterMessages
};
}
async function getMaxTokens({
maxToken,
model,
filterMessages = []
}: {
maxToken: number;
model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[];
}) {
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = await countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
maxToken = 200;
}
return {
max_tokens: maxToken
};
}
function targetResponse({
res,
outputs,
detail
}: {
res: NextApiResponse;
outputs: FlowNodeItemType['outputs'];
detail: boolean;
}) {
const targets =
outputs.find((output) => output.key === NodeOutputKeyEnum.answerText)?.targets || [];
if (targets.length === 0) return;
responseWrite({
res,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
});
}
async function streamResponse({
res,
detail,
stream
}: {
res: NextApiResponse;
detail: boolean;
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let answer = '';
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
}
if (!answer) {
return Promise.reject('core.chat.Chat API is error or undefined');
}
return { answer };
}

View File

@@ -0,0 +1,35 @@
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
import { filterSearchResultsByMaxChars } from '../../utils';
type DatasetConcatProps = ModuleDispatchProps<
{
[NodeInputKeyEnum.datasetMaxTokens]: number;
} & { [key: string]: SearchDataResponseItemType[] }
>;
type DatasetConcatResponse = {
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
};
export async function dispatchDatasetConcat(
props: DatasetConcatProps
): Promise<DatasetConcatResponse> {
const {
params: { limit = 1500, ...quoteMap }
} = props as DatasetConcatProps;
const quoteList = Object.values(quoteMap).filter((list) => Array.isArray(list));
const rrfConcatResults = datasetSearchResultConcat(
quoteList.map((list) => ({
k: 60,
list
}))
);
return {
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(rrfConcatResults, limit)
};
}

View File

@@ -0,0 +1,165 @@
// @ts-nocheck
import {
DispatchNodeResponseType,
DispatchNodeResultType
} from '@fastgpt/global/core/workflow/runtime/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { ModelTypeEnum, getLLMModel, getVectorModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
[NodeInputKeyEnum.datasetSimilarity]: number;
[NodeInputKeyEnum.datasetMaxTokens]: number;
[NodeInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.datasetSearchUsingReRank]: boolean;
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]: string;
}>;
export type DatasetSearchResponse = DispatchNodeResultType<{
isEmpty?: boolean;
unEmpty?: boolean;
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
}>;
export async function dispatchDatasetSearch(
props: DatasetSearchProps
): Promise<DatasetSearchResponse> {
const {
teamId,
histories,
module,
params: {
datasets = [],
similarity,
limit = 1500,
usingReRank,
searchMode,
userChatInput,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg
}
} = props as DatasetSearchProps;
if (!Array.isArray(datasets)) {
return Promise.reject('Quote type error');
}
if (datasets.length === 0) {
return Promise.reject('core.chat.error.Select dataset empty');
}
if (!userChatInput) {
return Promise.reject('core.chat.error.User input empty');
}
// query extension
const extensionModel =
datasetSearchUsingExtensionQuery && datasetSearchExtensionModel
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
query: userChatInput,
extensionModel,
extensionBg: datasetSearchExtensionBg,
histories: getHistories(6, histories)
});
// console.log(concatQueries, rewriteQuery, aiExtensionResult);
// get vector
const vectorModel = getVectorModel(datasets[0]?.vectorModel?.model);
// start search
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank
} = await searchDatasetData({
teamId,
reRankQuery: `${rewriteQuery}`,
queries: concatQueries,
model: vectorModel.model,
similarity,
limit,
datasetIds: datasets.map((item) => item.datasetId),
searchMode,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId))
});
// count bill results
// vector
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
tokens,
modelType: ModelTypeEnum.vector
});
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
tokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank,
quoteList: searchRes
};
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: module.name,
model: modelName,
tokens
}
];
if (aiExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: aiExtensionResult.model,
tokens: aiExtensionResult.tokens,
modelType: ModelTypeEnum.llm
});
responseData.totalPoints += totalPoints;
responseData.tokens = aiExtensionResult.tokens;
responseData.extensionModel = modelName;
responseData.extensionResult =
aiExtensionResult.extensionQueries?.join('\n') ||
JSON.stringify(aiExtensionResult.extensionQueries);
nodeDispatchUsages.push({
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
tokens: aiExtensionResult.tokens
});
}
return {
isEmpty: searchRes.length === 0 ? true : undefined,
unEmpty: searchRes.length > 0 ? true : undefined,
quoteQA: searchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
nodeDispatchUsages,
[DispatchNodeResponseKeyEnum.toolResponses]: searchRes.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`.trim()
}))
};
}

View File

@@ -0,0 +1,434 @@
// @ts-nocheck
import { NextApiResponse } from 'next';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ChatDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type/index.d';
import type {
AIChatItemValueItemType,
ChatHistoryItemResType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type.d';
import {
FlowNodeInputTypeEnum,
FlowNodeTypeEnum
} from '@fastgpt/global/core/workflow/node/constant';
import { FlowNodeItemType } from '@fastgpt/global/core/workflow/type';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { responseWriteNodeStatus } from '../../../common/response';
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
import { dispatchHistory } from './init/history';
import { dispatchChatInput } from './init/userChatInput';
import { dispatchChatCompletion } from './chat/oneapi';
import { dispatchDatasetSearch } from './dataset/search';
import { dispatchDatasetConcat } from './dataset/concat';
import { dispatchAnswer } from './tools/answer';
import { dispatchClassifyQuestion } from './agent/classifyQuestion';
import { dispatchContentExtract } from './agent/extract';
import { dispatchHttpRequest } from './tools/http';
import { dispatchHttp468Request } from './tools/http468';
import { dispatchAppRequest } from './tools/runApp';
import { dispatchQueryExtension } from './tools/queryExternsion';
import { dispatchRunPlugin } from './plugin/run';
import { dispatchPluginInput } from './plugin/runInput';
import { dispatchPluginOutput } from './plugin/runOutput';
import { checkTheModuleConnectedByTool, valueTypeFormat } from './utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { dispatchRunTools } from './agent/runTool/index';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { DispatchFlowResponse } from './type';
import { dispatchStopToolCall } from './agent/runTool/stopTool';
import { dispatchLafRequest } from './tools/runLaf';
const callbackMap: Record<string, Function> = {
questionInput: dispatchChatInput,
[FlowNodeTypeEnum.answerNode]: dispatchAnswer,
[FlowNodeTypeEnum.chatNode]: dispatchChatCompletion,
[FlowNodeTypeEnum.datasetSearchNode]: dispatchDatasetSearch,
[FlowNodeTypeEnum.datasetConcatNode]: dispatchDatasetConcat,
[FlowNodeTypeEnum.classifyQuestion]: dispatchClassifyQuestion,
[FlowNodeTypeEnum.contentExtract]: dispatchContentExtract,
[FlowNodeTypeEnum.httpRequest468]: dispatchHttp468Request,
[FlowNodeTypeEnum.runApp]: dispatchAppRequest,
[FlowNodeTypeEnum.pluginModule]: dispatchRunPlugin,
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
[FlowNodeTypeEnum.queryExtension]: dispatchQueryExtension,
[FlowNodeTypeEnum.tools]: dispatchRunTools,
[FlowNodeTypeEnum.stopTool]: dispatchStopToolCall,
[FlowNodeTypeEnum.lafModule]: dispatchLafRequest
};
/* running */
export async function dispatchWorkFlowV1({
res,
modules = [],
runtimeModules,
startParams = {},
histories = [],
variables = {},
user,
stream = false,
detail = false,
...props
}: ChatDispatchProps & {
modules?: FlowNodeItemType[]; // app modules
runtimeModules?: RuntimeNodeItemType[];
startParams?: Record<string, any>; // entry module params
}): Promise<DispatchFlowResponse> {
// set sse response headers
if (res && stream) {
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('X-Accel-Buffering', 'no');
res.setHeader('Cache-Control', 'no-cache, no-transform');
}
variables = {
...getSystemVariable({ timezone: user.timezone }),
...variables
};
const runningModules = runtimeModules ? runtimeModules : loadModules(modules, variables);
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
let chatNodeUsages: ChatNodeUsageType[] = [];
let toolRunResponse: ToolRunResponseItemType;
let runningTime = Date.now();
/* Store special response field */
function pushStore(
{ inputs = [] }: RuntimeNodeItemType,
{
answerText = '',
responseData,
nodeDispatchUsages,
toolResponses,
assistantResponses
}: {
[NodeOutputKeyEnum.answerText]?: string;
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]?: AIChatItemValueItemType[]; // tool module, save the response value
}
) {
const time = Date.now();
if (responseData) {
chatResponses.push({
...responseData,
runningTime: +((time - runningTime) / 1000).toFixed(2)
});
}
if (nodeDispatchUsages) {
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
props.maxRunTimes -= nodeDispatchUsages.length;
}
if (toolResponses !== undefined) {
if (Array.isArray(toolResponses) && toolResponses.length === 0) return;
if (typeof toolResponses === 'object' && Object.keys(toolResponses).length === 0) {
return;
}
toolRunResponse = toolResponses;
}
if (assistantResponses) {
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
}
// save assistant text response
if (answerText) {
const isResponseAnswerText =
inputs.find((item) => item.key === NodeInputKeyEnum.aiChatIsResponseText)?.value ?? true;
if (isResponseAnswerText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.text,
text: {
content: answerText
}
});
}
}
runningTime = time;
}
/* Inject data into module input */
function moduleInput(module: RuntimeNodeItemType, data: Record<string, any> = {}) {
const updateInputValue = (key: string, value: any) => {
const index = module.inputs.findIndex((item: any) => item.key === key);
if (index === -1) return;
module.inputs[index].value = value;
};
Object.entries(data).map(([key, val]: any) => {
updateInputValue(key, val);
});
return;
}
/* Pass the output of the module to the next stage */
function moduleOutput(
module: RuntimeNodeItemType,
result: Record<string, any> = {}
): Promise<any> {
pushStore(module, result);
const nextRunModules: RuntimeNodeItemType[] = [];
// Assign the output value to the next module
module.outputs.map((outputItem) => {
if (result[outputItem.key] === undefined) return;
/* update output value */
outputItem.value = result[outputItem.key];
/* update target */
outputItem.targets.map((target: any) => {
// find module
const targetModule = runningModules.find((item) => item.moduleId === target.moduleId);
if (!targetModule) return;
// push to running queue
nextRunModules.push(targetModule);
// update input
moduleInput(targetModule, { [target.key]: outputItem.value });
});
});
// Ensure the uniqueness of running modules
const set = new Set<string>();
const filterModules = nextRunModules.filter((module) => {
if (set.has(module.moduleId)) return false;
set.add(module.moduleId);
return true;
});
return checkModulesCanRun(filterModules);
}
function checkModulesCanRun(modules: RuntimeNodeItemType[] = []) {
return Promise.all(
modules.map((module) => {
if (!module.inputs.find((item: any) => item.value === undefined)) {
// remove switch
moduleInput(module, { [NodeInputKeyEnum.switch]: undefined });
return moduleRun(module);
}
})
);
}
async function moduleRun(module: RuntimeNodeItemType): Promise<any> {
if (res?.closed || props.maxRunTimes <= 0) return Promise.resolve();
if (res && stream && detail && module.showStatus) {
responseStatus({
res,
name: module.name,
status: 'running'
});
}
// get module running params
const params: Record<string, any> = {};
module.inputs.forEach((item) => {
params[item.key] = valueTypeFormat(item.value, item.valueType);
});
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
...props,
res,
variables,
histories,
user,
stream,
detail,
module,
runtimeModules: runningModules,
params
};
// run module
const dispatchRes: Record<string, any> = await (async () => {
if (callbackMap[module.flowType]) {
return callbackMap[module.flowType](dispatchData);
}
return {};
})();
// format response data. Add modulename and module type
const formatResponseData: ChatHistoryItemResType = (() => {
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
return {
moduleName: module.name,
moduleType: module.flowType,
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
};
})();
// Add output default value
module.outputs.forEach((item) => {
if (!item.required) return;
if (dispatchRes[item.key] !== undefined) return;
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
});
// Pass userChatInput
const hasUserChatInputTarget = !!module.outputs.find(
(item) => item.key === NodeOutputKeyEnum.userChatInput
)?.targets?.length;
return moduleOutput(module, {
[NodeOutputKeyEnum.finish]: true,
[NodeOutputKeyEnum.userChatInput]: hasUserChatInputTarget
? params[NodeOutputKeyEnum.userChatInput]
: undefined,
...dispatchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData,
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]:
dispatchRes[DispatchNodeResponseKeyEnum.nodeDispatchUsages]
});
}
// start process width initInput
const initModules = runningModules.filter((item) => item.isEntry);
// reset entry
modules.forEach((item) => {
item.isEntry = false;
});
initModules.map((module) =>
moduleInput(module, {
...startParams,
history: [] // abandon history field. History module will get histories from other fields.
})
);
await checkModulesCanRun(initModules);
// focus try to run pluginOutput
const pluginOutputModule = runningModules.find(
(item) => item.flowType === FlowNodeTypeEnum.pluginOutput
);
if (pluginOutputModule) {
await moduleRun(pluginOutputModule);
}
return {
flowResponses: chatResponses,
flowUsages: chatNodeUsages,
[DispatchNodeResponseKeyEnum.assistantResponses]:
concatAssistantResponseAnswerText(chatAssistantResponse),
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse
};
}
/* init store modules to running modules */
function loadModules(
modules: FlowNodeItemType[],
variables: Record<string, any>
): RuntimeNodeItemType[] {
return modules
.filter((item) => {
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
})
.map<RuntimeNodeItemType>((module) => {
return {
moduleId: module.moduleId,
name: module.name,
avatar: module.avatar,
intro: module.intro,
flowType: module.flowType,
showStatus: module.showStatus,
isEntry: module.isEntry,
inputs: module.inputs
.filter(
/*
1. system input must be save
2. connected by source handle
3. manual input value or have default value
4. For the module connected by the tool, leave the toolDescription input
*/
(item) => {
const isTool = checkTheModuleConnectedByTool(modules, module);
if (isTool && item.toolDescription) {
return true;
}
return item.type === 'systemInput' || item.connected || item.value !== undefined;
}
) // filter unconnected target input
.map((item) => {
const replace = ['string'].includes(typeof item.value);
return {
key: item.key,
// variables replace
value: replace ? replaceVariable(item.value, variables) : item.value,
valueType: item.valueType,
required: item.required,
toolDescription: item.toolDescription
};
}),
outputs: module.outputs
.map((item) => ({
key: item.key,
required: item.required,
defaultValue: item.defaultValue,
answer: item.key === NodeOutputKeyEnum.answerText,
value: undefined,
valueType: item.valueType,
targets: item.targets
}))
.sort((a, b) => {
// finish output always at last
if (a.key === NodeOutputKeyEnum.finish) return 1;
if (b.key === NodeOutputKeyEnum.finish) return -1;
return 0;
})
};
});
}
/* sse response modules staus */
export function responseStatus({
res,
status,
name
}: {
res: NextApiResponse;
status?: 'running' | 'finish';
name?: string;
}) {
if (!name) return;
responseWriteNodeStatus({
res,
name
});
}
/* get system variable */
export function getSystemVariable({ timezone }: { timezone: string }) {
return {
cTime: getSystemTime(timezone)
};
}
export const concatAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
const result: AIChatItemValueItemType[] = [];
// 合并连续的text
for (let i = 0; i < response.length; i++) {
const item = response[i];
if (item.type === ChatItemValueTypeEnum.text) {
let text = item.text?.content || '';
const lastItem = result[result.length - 1];
if (lastItem && lastItem.type === ChatItemValueTypeEnum.text && lastItem.text?.content) {
lastItem.text.content += text;
continue;
}
}
result.push(item);
}
return result;
};

View File

@@ -1,10 +1,10 @@
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { getHistories } from '../utils';
export type HistoryProps = ModuleDispatchProps<{
maxContext?: number;
[ModuleInputKeyEnum.history]: ChatItemType[];
[NodeInputKeyEnum.history]: ChatItemType[];
}>;
export const dispatchHistory = (props: Record<string, any>) => {

View File

@@ -1,7 +1,7 @@
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
export type UserChatInputProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.userChatInput]: string;
}>;
export const dispatchChatInput = (props: Record<string, any>) => {

View File

@@ -0,0 +1,99 @@
// @ts-nocheck
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { dispatchWorkFlowV1 } from '../index';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getPluginRuntimeById } from '../../../plugin/controller';
import { authPluginCanUse } from '../../../../support/permission/auth/plugin';
import { setEntryEntries, DYNAMIC_INPUT_KEY } from '../utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type RunPluginProps = ModuleDispatchProps<{
[NodeInputKeyEnum.pluginId]: string;
[key: string]: any;
}>;
type RunPluginResponse = DispatchNodeResultType<{}>;
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
const {
mode,
teamId,
tmbId,
params: { pluginId, ...data }
} = props;
if (!pluginId) {
return Promise.reject('pluginId can not find');
}
await authPluginCanUse({ id: pluginId, teamId, tmbId });
const plugin = await getPluginRuntimeById(pluginId);
// concat dynamic inputs
const inputModule = plugin.nodes.find((item) => item.flowType === FlowNodeTypeEnum.pluginInput);
if (!inputModule) return Promise.reject('Plugin error, It has no set input.');
const hasDynamicInput = inputModule.inputs.find((input) => input.key === DYNAMIC_INPUT_KEY);
const startParams: Record<string, any> = (() => {
if (!hasDynamicInput) return data;
const params: Record<string, any> = {
[DYNAMIC_INPUT_KEY]: {}
};
for (const key in data) {
const input = inputModule.inputs.find((input) => input.key === key);
if (input) {
params[key] = data[key];
} else {
params[DYNAMIC_INPUT_KEY][key] = data[key];
}
}
return params;
})();
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlowV1({
...props,
modules: setEntryEntries(plugin.nodes).map((module) => ({
...module,
showStatus: false
})),
runtimeModules: undefined, // must reset
startParams
});
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
if (output) {
output.moduleLogo = plugin.avatar;
}
return {
assistantResponses,
// responseData, // debug
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: plugin.avatar,
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
pluginOutput: output?.pluginOutput,
pluginDetail:
mode === 'test' && plugin.teamId === teamId
? flowResponses.filter((item) => {
const filterArr = [FlowNodeTypeEnum.pluginOutput];
return !filterArr.includes(item.moduleType as any);
})
: undefined
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: plugin.name,
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
model: plugin.name,
tokens: 0
}
],
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput ? output.pluginOutput : {},
...(output ? output.pluginOutput : {})
};
};

View File

@@ -0,0 +1,11 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
export type PluginInputProps = ModuleDispatchProps<{
[key: string]: any;
}>;
export const dispatchPluginInput = (props: PluginInputProps) => {
const { params } = props;
return params;
};

View File

@@ -0,0 +1,19 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type.d';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
export type PluginOutputProps = ModuleDispatchProps<{
[key: string]: any;
}>;
export type PluginOutputResponse = DispatchNodeResultType<{}>;
export const dispatchPluginOutput = (props: PluginOutputProps): PluginOutputResponse => {
const { params } = props;
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
pluginOutput: params
}
};
};

View File

@@ -0,0 +1,37 @@
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { responseWrite } from '../../../../common/response';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
export type AnswerProps = ModuleDispatchProps<{
text: string;
}>;
export type AnswerResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
}>;
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
const {
res,
detail,
stream,
params: { text = '' }
} = props as AnswerProps;
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
if (stream) {
responseWrite({
res,
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
data: textAdaptGptResponse({
text: `\n${formatText}`
})
});
}
return {
[NodeOutputKeyEnum.answerText]: formatText
};
};

View File

@@ -1,24 +1,22 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
// @ts-nocheck
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { DYNAMIC_INPUT_KEY } from '../utils';
type HttpRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.abandon_httpUrl]: string;
[ModuleInputKeyEnum.httpMethod]: string;
[ModuleInputKeyEnum.httpReqUrl]: string;
[ModuleInputKeyEnum.httpHeaders]: string;
[NodeInputKeyEnum.abandon_httpUrl]: string;
[NodeInputKeyEnum.httpMethod]: string;
[NodeInputKeyEnum.httpReqUrl]: string;
[NodeInputKeyEnum.httpHeaders]: string;
[key: string]: any;
}>;
type HttpResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[NodeOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
@@ -108,7 +106,7 @@ export const dispatchHttpRequest = async (props: HttpRequestProps): Promise<Http
console.log(error);
return {
[ModuleOutputKeyEnum.failed]: true,
[NodeOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: formatBody,

View File

@@ -0,0 +1,294 @@
// @ts-nocheck
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import axios from 'axios';
import { DYNAMIC_INPUT_KEY, valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { addLog } from '../../../../common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
type PropsArrType = {
key: string;
type: string;
value: string;
};
type HttpRequestProps = ModuleDispatchProps<{
[NodeInputKeyEnum.abandon_httpUrl]: string;
[NodeInputKeyEnum.httpMethod]: string;
[NodeInputKeyEnum.httpReqUrl]: string;
[NodeInputKeyEnum.httpHeaders]: PropsArrType[];
[NodeInputKeyEnum.httpParams]: PropsArrType[];
[NodeInputKeyEnum.httpJsonBody]: string;
[DYNAMIC_INPUT_KEY]: Record<string, any>;
[key: string]: any;
}>;
type HttpResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<HttpResponse> => {
let {
appId,
chatId,
responseChatItemId,
variables,
module: { outputs },
histories,
params: {
system_httpMethod: httpMethod = 'POST',
system_httpReqUrl: httpReqUrl,
system_httpHeader: httpHeader,
system_httpParams: httpParams = [],
system_httpJsonBody: httpJsonBody,
[DYNAMIC_INPUT_KEY]: dynamicInput,
...body
}
} = props;
if (!httpReqUrl) {
return Promise.reject('Http url is empty');
}
const concatVariables = {
appId,
chatId,
responseChatItemId,
...variables,
histories: histories.slice(-10),
...body
};
httpReqUrl = replaceVariable(httpReqUrl, concatVariables);
// parse header
const headers = await (() => {
try {
if (!httpHeader || httpHeader.length === 0) return {};
// array
return httpHeader.reduce((acc: Record<string, string>, item) => {
const key = replaceVariable(item.key, concatVariables);
const value = replaceVariable(item.value, concatVariables);
acc[key] = valueTypeFormat(value, 'string');
return acc;
}, {});
} catch (error) {
return Promise.reject('Header 为非法 JSON 格式');
}
})();
const params = httpParams.reduce((acc: Record<string, string>, item) => {
const key = replaceVariable(item.key, concatVariables);
const value = replaceVariable(item.value, concatVariables);
acc[key] = valueTypeFormat(value, 'string');
return acc;
}, {});
const requestBody = await (() => {
if (!httpJsonBody) return { [DYNAMIC_INPUT_KEY]: dynamicInput };
httpJsonBody = replaceVariable(httpJsonBody, concatVariables);
try {
const jsonParse = JSON.parse(httpJsonBody);
const removeSignJson = removeUndefinedSign(jsonParse);
return { [DYNAMIC_INPUT_KEY]: dynamicInput, ...removeSignJson };
} catch (error) {
console.log(error);
return Promise.reject(`Invalid JSON body: ${httpJsonBody}`);
}
})();
try {
const { formatResponse, rawResponse } = await fetchData({
method: httpMethod,
url: httpReqUrl,
headers,
body: requestBody,
params
});
// format output value type
const results: Record<string, any> = {};
for (const key in formatResponse) {
const output = outputs.find((item) => item.key === key);
if (!output) continue;
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
}
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: rawResponse
},
[DispatchNodeResponseKeyEnum.toolResponses]: results,
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
...results
};
} catch (error) {
addLog.error('Http request error', error);
return {
[NodeOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: { error: formatHttpError(error) }
},
[NodeOutputKeyEnum.httpRawResponse]: getErrText(error)
};
}
};
async function fetchData({
method,
url,
headers,
body,
params
}: {
method: string;
url: string;
headers: Record<string, any>;
body: Record<string, any>;
params: Record<string, any>;
}): Promise<Record<string, any>> {
const { data: response } = await axios({
method,
baseURL: `http://${SERVICE_LOCAL_HOST}`,
url,
headers: {
'Content-Type': 'application/json',
...headers
},
timeout: 120000,
params: params,
data: ['POST', 'PUT', 'PATCH'].includes(method) ? body : undefined
});
/*
parse the json:
{
user: {
name: 'xxx',
age: 12
},
list: [
{
name: 'xxx',
age: 50
},
[{ test: 22 }]
],
psw: 'xxx'
}
result: {
'user': { name: 'xxx', age: 12 },
'user.name': 'xxx',
'user.age': 12,
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
'list[0]': { name: 'xxx', age: 50 },
'list[0].name': 'xxx',
'list[0].age': 50,
'list[1]': [ { test: 22 } ],
'list[1][0]': { test: 22 },
'list[1][0].test': 22,
'psw': 'xxx'
}
*/
const parseJson = (obj: Record<string, any>, prefix = '') => {
let result: Record<string, any> = {};
if (Array.isArray(obj)) {
for (let i = 0; i < obj.length; i++) {
result[`${prefix}[${i}]`] = obj[i];
if (Array.isArray(obj[i])) {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}]`)
};
} else if (typeof obj[i] === 'object') {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}].`)
};
}
}
} else if (typeof obj == 'object') {
for (const key in obj) {
result[`${prefix}${key}`] = obj[key];
if (Array.isArray(obj[key])) {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}`)
};
} else if (typeof obj[key] === 'object') {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}.`)
};
}
}
}
return result;
};
return {
formatResponse:
typeof response === 'object' && !Array.isArray(response) ? parseJson(response) : {},
rawResponse: response
};
}
function replaceVariable(text: string, obj: Record<string, any>) {
for (const [key, value] of Object.entries(obj)) {
if (value === undefined) {
text = text.replace(new RegExp(`{{${key}}}`, 'g'), UNDEFINED_SIGN);
} else {
const replacement = JSON.stringify(value);
const unquotedReplacement =
replacement.startsWith('"') && replacement.endsWith('"')
? replacement.slice(1, -1)
: replacement;
text = text.replace(new RegExp(`{{${key}}}`, 'g'), unquotedReplacement);
}
}
return text || '';
}
function removeUndefinedSign(obj: Record<string, any>) {
for (const key in obj) {
if (obj[key] === UNDEFINED_SIGN) {
obj[key] = undefined;
} else if (Array.isArray(obj[key])) {
obj[key] = obj[key].map((item: any) => {
if (item === UNDEFINED_SIGN) {
return undefined;
} else if (typeof item === 'object') {
removeUndefinedSign(item);
}
return item;
});
} else if (typeof obj[key] === 'object') {
removeUndefinedSign(obj[key]);
}
}
return obj;
}
function formatHttpError(error: any) {
return {
message: error?.message,
name: error?.name,
method: error?.config?.method,
baseURL: error?.config?.baseURL,
url: error?.config?.url,
code: error?.code,
status: error?.status
};
}

View File

@@ -0,0 +1,77 @@
// @ts-nocheck
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { ModelTypeEnum, getLLMModel } from '../../../../core/ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { queryExtension } from '../../../../core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.userChatInput]: string;
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.text]: string;
}>;
export const dispatchQueryExtension = async ({
histories,
module,
params: { model, systemPrompt, history, userChatInput }
}: Props): Promise<Response> => {
if (!userChatInput) {
return Promise.reject('Question is empty');
}
const queryExtensionModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { extensionQueries, tokens } = await queryExtension({
chatBg: systemPrompt,
query: userChatInput,
histories: chatHistories,
model: queryExtensionModel.model
});
extensionQueries.unshift(userChatInput);
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionModel.model,
tokens,
modelType: ModelTypeEnum.llm
});
const set = new Set<string>();
const filterSameQueries = extensionQueries.filter((item) => {
// 删除所有的标点符号与空格等,只对文本进行比较
const str = hashStr(item.replace(/[^\p{L}\p{N}]/gu, ''));
if (set.has(str)) return false;
set.add(str);
return true;
});
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints,
model: modelName,
tokens,
query: userChatInput,
textOutput: JSON.stringify(filterSameQueries)
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: module.name,
totalPoints,
model: modelName,
tokens
}
],
[NodeOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
};
};

View File

@@ -0,0 +1,108 @@
// @ts-nocheck
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { SelectAppItemType } from '@fastgpt/global/core/workflow/type';
import { dispatchWorkFlowV1 } from '../index';
import { MongoApp } from '../../../../core/app/schema';
import { responseWrite } from '../../../../common/response';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getHistories, setEntryEntries } from '../utils';
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
app: SelectAppItemType;
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
[NodeOutputKeyEnum.history]: ChatItemType[];
}>;
export const dispatchAppRequest = async (props: Props): Promise<Response> => {
const {
res,
teamId,
stream,
detail,
histories,
inputFiles,
params: { userChatInput, history, app }
} = props;
let start = Date.now();
if (!userChatInput) {
return Promise.reject('Input is empty');
}
const appData = await MongoApp.findOne({
_id: app.id,
teamId
});
if (!appData) {
return Promise.reject('App not found');
}
if (stream) {
responseWrite({
res,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
});
}
const chatHistories = getHistories(history, histories);
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlowV1({
...props,
appId: app.id,
modules: setEntryEntries(appData.modules),
runtimeModules: undefined, // must reset
histories: chatHistories,
inputFiles,
startParams: {
userChatInput
}
});
const completeMessages = chatHistories.concat([
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
files: inputFiles,
text: userChatInput
})
},
{
obj: ChatRoleEnum.AI,
value: assistantResponses
}
]);
const { text } = chatValue2RuntimePrompt(assistantResponses);
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: appData.avatar,
query: userChatInput,
textOutput: text,
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: appData.name,
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
}
],
answerText: text,
history: completeMessages
};
};

View File

@@ -0,0 +1,209 @@
// @ts-nocheck
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import axios from 'axios';
import { DYNAMIC_INPUT_KEY, valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { addLog } from '../../../../common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
type LafRequestProps = ModuleDispatchProps<{
[NodeInputKeyEnum.httpReqUrl]: string;
[DYNAMIC_INPUT_KEY]: Record<string, any>;
[key: string]: any;
}>;
type LafResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
export const dispatchLafRequest = async (props: LafRequestProps): Promise<LafResponse> => {
let {
appId,
chatId,
responseChatItemId,
variables,
module: { outputs },
histories,
params: { system_httpReqUrl: httpReqUrl, [DYNAMIC_INPUT_KEY]: dynamicInput, ...body }
} = props;
if (!httpReqUrl) {
return Promise.reject('Http url is empty');
}
const concatVariables = {
appId,
chatId,
responseChatItemId,
...variables,
...body
};
httpReqUrl = replaceVariable(httpReqUrl, concatVariables);
const requestBody = {
systemParams: {
appId,
chatId,
responseChatItemId,
histories: histories.slice(0, 10)
},
variables,
...dynamicInput,
...body
};
try {
const { formatResponse, rawResponse } = await fetchData({
method: 'POST',
url: httpReqUrl,
body: requestBody
});
// format output value type
const results: Record<string, any> = {};
for (const key in formatResponse) {
const output = outputs.find((item) => item.key === key);
if (!output) continue;
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
}
return {
assistantResponses: [],
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
httpResult: rawResponse
},
[DispatchNodeResponseKeyEnum.toolResponses]: rawResponse,
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
...results
};
} catch (error) {
addLog.error('Http request error', error);
return {
[NodeOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
httpResult: { error: formatHttpError(error) }
}
};
}
};
async function fetchData({
method,
url,
body
}: {
method: string;
url: string;
body: Record<string, any>;
}): Promise<Record<string, any>> {
const { data: response } = await axios({
method,
baseURL: `http://${SERVICE_LOCAL_HOST}`,
url,
headers: {
'Content-Type': 'application/json'
},
data: body
});
const parseJson = (obj: Record<string, any>, prefix = '') => {
let result: Record<string, any> = {};
if (Array.isArray(obj)) {
for (let i = 0; i < obj.length; i++) {
result[`${prefix}[${i}]`] = obj[i];
if (Array.isArray(obj[i])) {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}]`)
};
} else if (typeof obj[i] === 'object') {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}].`)
};
}
}
} else if (typeof obj == 'object') {
for (const key in obj) {
result[`${prefix}${key}`] = obj[key];
if (Array.isArray(obj[key])) {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}`)
};
} else if (typeof obj[key] === 'object') {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}.`)
};
}
}
}
return result;
};
return {
formatResponse:
typeof response === 'object' && !Array.isArray(response) ? parseJson(response) : {},
rawResponse: response
};
}
function replaceVariable(text: string, obj: Record<string, any>) {
for (const [key, value] of Object.entries(obj)) {
if (value === undefined) {
text = text.replace(new RegExp(`{{${key}}}`, 'g'), UNDEFINED_SIGN);
} else {
const replacement = JSON.stringify(value);
const unquotedReplacement =
replacement.startsWith('"') && replacement.endsWith('"')
? replacement.slice(1, -1)
: replacement;
text = text.replace(new RegExp(`{{${key}}}`, 'g'), unquotedReplacement);
}
}
return text || '';
}
function removeUndefinedSign(obj: Record<string, any>) {
for (const key in obj) {
if (obj[key] === UNDEFINED_SIGN) {
obj[key] = undefined;
} else if (Array.isArray(obj[key])) {
obj[key] = obj[key].map((item: any) => {
if (item === UNDEFINED_SIGN) {
return undefined;
} else if (typeof item === 'object') {
removeUndefinedSign(item);
}
return item;
});
} else if (typeof obj[key] === 'object') {
removeUndefinedSign(obj[key]);
}
}
return obj;
}
function formatHttpError(error: any) {
return {
message: error?.message,
name: error?.name,
method: error?.config?.method,
baseURL: error?.config?.baseURL,
url: error?.config?.url,
code: error?.code,
status: error?.status
};
}

View File

@@ -0,0 +1,15 @@
import {
AIChatItemValueItemType,
ChatHistoryItemResType,
ChatItemValueItemType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export type DispatchFlowResponse = {
flowResponses: ChatHistoryItemResType[];
flowUsages: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
};

View File

@@ -0,0 +1,66 @@
// @ts-nocheck
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import {
WorkflowIOValueTypeEnum,
NodeOutputKeyEnum
} from '@fastgpt/global/core/workflow/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { FlowNodeItemType, StoreNodeItemType } from '@fastgpt/global/core/workflow/type/index';
export const DYNAMIC_INPUT_KEY = 'DYNAMIC_INPUT_KEY';
export const setEntryEntries = (modules: StoreNodeItemType[]) => {
const initRunningModuleType: Record<string, boolean> = {
questionInput: true,
[FlowNodeTypeEnum.pluginInput]: true
};
modules.forEach((item) => {
if (initRunningModuleType[item.flowType]) {
item.isEntry = true;
}
});
return modules;
};
export const checkTheModuleConnectedByTool = (
modules: FlowNodeItemType[],
module: FlowNodeItemType
) => {
let sign = false;
const toolModules = modules.filter((item) => item.flowType === FlowNodeTypeEnum.tools);
toolModules.forEach((item) => {
const toolOutput = item.outputs.find(
(output) => output.key === NodeOutputKeyEnum.selectedTools
);
toolOutput?.targets.forEach((target) => {
if (target.moduleId === module.moduleId) {
sign = true;
}
});
});
return sign;
};
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
if (!history) return [];
if (typeof history === 'number') return histories.slice(-history);
if (Array.isArray(history)) return history;
return [];
};
/* value type format */
export const valueTypeFormat = (value: any, type?: `${WorkflowIOValueTypeEnum}`) => {
if (value === undefined) return;
if (type === 'string') {
if (typeof value !== 'object') return String(value);
return JSON.stringify(value);
}
if (type === 'number') return Number(value);
if (type === 'boolean') return Boolean(value);
return value;
};

View File

@@ -0,0 +1,25 @@
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { countPromptTokens } from '../../common/string/tiktoken/index';
/* filter search result */
export const filterSearchResultsByMaxChars = async (
list: SearchDataResponseItemType[],
maxTokens: number
) => {
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for (let i = 0; i < list.length; i++) {
const item = list[i];
totalTokens += await countPromptTokens(item.q + item.a);
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? list.slice(0, 1) : results;
};