V4.7-alpha (#985)

Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-03-13 10:50:02 +08:00
committed by GitHub
parent 5bca15f12f
commit 9501c3f3a1
170 changed files with 5786 additions and 2342 deletions

View File

@@ -1,7 +0,0 @@
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
export const initRunningModuleType: Record<string, boolean> = {
[FlowNodeTypeEnum.historyNode]: true,
[FlowNodeTypeEnum.questionInput]: true,
[FlowNodeTypeEnum.pluginInput]: true
};

View File

@@ -2,7 +2,7 @@ import { MongoDatasetTraining } from '@fastgpt/service/core/dataset/training/sch
import { pushQAUsage } from '@/service/support/wallet/usage/push';
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type { ChatMessageItemType } from '@fastgpt/global/core/ai/type.d';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { addLog } from '@fastgpt/service/common/system/log';
import { splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
@@ -101,7 +101,7 @@ export async function generateQA(): Promise<any> {
${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
// request LLM to get QA
const messages: ChatMessageItemType[] = [
const messages: ChatCompletionMessageParam[] = [
{
role: 'user',
content: prompt

View File

@@ -1,14 +1,15 @@
import { adaptChat2GptMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatContextFilter } from '@fastgpt/service/core/chat/utils';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type {
ClassifyQuestionAgentItemType,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_CQJson } from '@/global/core/prompt/agent';
@@ -16,6 +17,13 @@ import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
@@ -24,9 +32,10 @@ type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
}>;
type CQResponse = ModuleDispatchResponse<{
type CQResponse = DispatchNodeResultType<{
[key: string]: any;
}>;
type ActionProps = Props & { cqModel: LLMModelItemType };
const agentFunName = 'classify_question';
@@ -55,6 +64,13 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
cqModel
});
}
if (cqModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
cqModel
});
}
return completions({
...props,
histories: chatHistories,
@@ -72,7 +88,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
return {
[result.key]: true,
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: userChatInput,
@@ -81,7 +97,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
cqResult: result.value,
contextTotalLen: chatHistories.length + 2
},
[ModuleOutputKeyEnum.moduleDispatchBills]: [
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
@@ -92,37 +108,43 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
};
};
async function toolChoice({
user,
const getFunctionCallSchema = ({
cqModel,
histories,
params: { agents, systemPrompt, userChatInput }
}: Props & { cqModel: LLMModelItemType }) {
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: systemPrompt
? `<背景知识>
${systemPrompt}
</背景知识>
问题: "${userChatInput}"
`
: userChatInput
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: systemPrompt
? `<背景知识>
${systemPrompt}
</背景知识>
问题: "${userChatInput}"
`
: userChatInput
}
}
]
}
];
const filterMessages = ChatContextFilter({
messages,
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: cqModel.maxContext
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
// function body
const agentFunction = {
name: agentFunName,
description: '根据对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
description: '结合对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
parameters: {
type: 'object',
properties: {
@@ -137,7 +159,19 @@ ${systemPrompt}
required: ['type']
}
};
const tools: any = [
return {
agentFunction,
filterMessages
};
};
const toolChoice = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
// function body
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
@@ -152,7 +186,7 @@ ${systemPrompt}
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: adaptMessages,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
@@ -161,13 +195,19 @@ ${systemPrompt}
const arg = JSON.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
);
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
arg,
tokens: countMessagesTokens(messages, tools)
tokens: countGptMessagesTokens(completeMessages, tools)
};
} catch (error) {
console.log(agentFunction.parameters);
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
@@ -177,25 +217,79 @@ ${systemPrompt}
tokens: 0
};
}
}
};
async function completions({
const functionCall = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
cqModel,
user,
histories,
params: { agents, systemPrompt = '', userChatInput }
}: Props & { cqModel: LLMModelItemType }) {
}: ActionProps) => {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
.join('\n'),
history: histories.map((item) => `${item.obj}:${item.value}`).join('\n'),
question: userChatInput
})
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
.join('\n'),
history: histories.map((item) => `${item.obj}:${item.value}`).join('\n'),
question: userChatInput
})
}
}
]
}
];
@@ -207,7 +301,7 @@ async function completions({
const data = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
messages: chats2GPTMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
@@ -219,4 +313,4 @@ async function completions({
tokens: countMessagesTokens(messages),
arg: { type: id }
};
}
};

View File

@@ -1,14 +1,15 @@
import { adaptChat2GptMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatContextFilter } from '@fastgpt/service/core/chat/utils';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type {
ContextExtractAgentItemType,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type';
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/module/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { Prompt_ExtractJson } from '@/global/core/prompt/agent';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
@@ -17,6 +18,13 @@ import { getHistories } from '../utils';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import json5 from 'json5';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
@@ -25,12 +33,14 @@ type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.description]: string;
[ModuleInputKeyEnum.aiModel]: string;
}>;
type Response = ModuleDispatchResponse<{
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.success]?: boolean;
[ModuleOutputKeyEnum.failed]?: boolean;
[ModuleOutputKeyEnum.contextExtractFields]: string;
}>;
type ActionProps = Props & { extractModel: LLMModelItemType };
const agentFunName = 'extract_json_data';
export async function dispatchContentExtract(props: Props): Promise<Response> {
@@ -56,6 +66,13 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
extractModel
});
}
if (extractModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
extractModel
});
}
return completions({
...props,
histories: chatHistories,
@@ -105,7 +122,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
[ModuleOutputKeyEnum.failed]: success ? undefined : true,
[ModuleOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
...arg,
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: content,
@@ -114,7 +131,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
extractResult: arg,
contextTotalLen: chatHistories.length + 2
},
[ModuleOutputKeyEnum.moduleDispatchBills]: [
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
@@ -125,30 +142,36 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
};
}
async function toolChoice({
const getFunctionCallSchema = ({
extractModel,
user,
histories,
params: { content, extractKeys, description }
}: Props & { extractModel: LLMModelItemType }) {
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: `你的任务是根据上下文获取适当的 JSON 字符串。要求:
"""
- 字符串不要换行。
- 结合上下文和当前问题进行获取。
"""
当前问题: "${content}"`
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: `你的任务是根据上下文获取适当的 JSON 字符串。要求:
"""
- 字符串不要换行。
- 结合上下文和当前问题进行获取。
"""
当前问题: "${content}"`
}
}
]
}
];
const filterMessages = ChatContextFilter({
messages,
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: extractModel.maxContext
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
const properties: Record<
string,
@@ -164,7 +187,6 @@ async function toolChoice({
...(item.enum ? { enum: item.enum.split('\n') } : {})
};
});
// function body
const agentFunction = {
name: agentFunName,
@@ -174,7 +196,19 @@ async function toolChoice({
properties
}
};
const tools: any = [
return {
filterMessages,
agentFunction
};
};
const toolChoice = async (props: ActionProps) => {
const { user, extractModel } = props;
const { filterMessages, agentFunction } = getFunctionCallSchema(props);
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
@@ -189,7 +223,7 @@ async function toolChoice({
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: [...adaptMessages],
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
@@ -207,35 +241,96 @@ async function toolChoice({
}
})();
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
rawResponse: response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '',
tokens: countMessagesTokens(messages, tools),
tokens: countGptMessagesTokens(completeMessages, tools),
arg
};
}
};
async function completions({
const functionCall = async (props: ActionProps) => {
const { user, extractModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
extractModel,
user,
histories,
params: { content, extractKeys, description }
}: Props & { extractModel: LLMModelItemType }) {
}: ActionProps) => {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
description,
json: extractKeys
.map(
(item) =>
`{"key":"${item.key}", "description":"${item.desc}"${
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
}}`
)
.join('\n'),
text: `${histories.map((item) => `${item.obj}:${item.value}`).join('\n')}
Human: ${content}`
})
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
description,
json: extractKeys
.map(
(item) =>
`{"key":"${item.key}", "description":"${item.desc}"${
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
}}`
)
.join('\n'),
text: `${histories.map((item) => `${item.obj}:${item.value}`).join('\n')}
Human: ${content}`
})
}
}
]
}
];
@@ -246,7 +341,7 @@ Human: ${content}`
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
messages: chats2GPTMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
@@ -276,4 +371,4 @@ Human: ${content}`
arg: {}
};
}
}
};

View File

@@ -0,0 +1,359 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
import {
ChatCompletion,
StreamChatType,
ChatCompletionMessageParam,
ChatCompletionCreateParams,
ChatCompletionMessageFunctionCall,
ChatCompletionFunctionMessageParam,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '@fastgpt/service/common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { getNanoid } from '@fastgpt/global/common/string/tools';
type ToolRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
functionCallMsg: ChatCompletionFunctionMessageParam;
}[];
export const runToolWithFunctionCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const functions: ChatCompletionCreateParams.Function[] = toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
name: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
});
const filterMessages = filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages,
functions,
function_call: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answer, functionCalls } = await (async () => {
if (stream) {
return streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const function_call = result.choices?.[0]?.message?.function_call;
const toolModule = toolModules.find((module) => module.moduleId === function_call?.name);
const toolCalls = function_call
? [
{
...function_call,
id: getNanoid(),
toolName: toolModule?.name,
toolAvatar: toolModule?.avatar
}
]
: [];
return {
answer: result.choices?.[0]?.message?.content || '',
functionCalls: toolCalls
};
}
})();
// Run the selected tool.
const toolsRunResponse = (
await Promise.all(
functionCalls.map(async (tool) => {
if (!tool) return;
const toolModule = toolModules.find((module) => module.moduleId === tool.name);
if (!toolModule) return;
const startParams = (() => {
try {
return json5.parse(tool.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const functionCallMsg: ChatCompletionFunctionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Function,
name: tool.name,
content: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
};
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
}
})
});
}
return {
moduleRunResponse,
functionCallMsg
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
const functionCall = functionCalls[0];
if (functionCall && !res.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: functionCall
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, undefined, functions);
// console.log(tokens, 'tool');
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
return runToolWithFunctionCall(
{
...props,
messages: [...concatToolMessages, ...toolsRunResponse.map((item) => item?.functionCallMsg)]
},
{
dispatchFlowResponse: response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens
}
);
} else {
// No tool is invoked, indicating that the process is over
const completeMessages = filterMessages.concat({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
});
const tokens = countGptMessagesTokens(completeMessages, undefined, functions);
// console.log(tokens, 'response token');
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages
};
}
};
async function streamResponse({
res,
detail,
toolModules,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let textAnswer = '';
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
let functionId = getNanoid();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (responseChoice.function_call) {
const functionCall: {
arguments: string;
name?: string;
} = responseChoice.function_call;
// 流响应中,每次只会返回一个函数如果带了name说明触发某个函数
if (functionCall?.name) {
functionId = getNanoid();
const toolModule = toolModules.find((module) => module.moduleId === functionCall?.name);
if (toolModule) {
if (functionCall?.arguments === undefined) {
functionCall.arguments = '';
}
functionCalls.push({
...functionCall,
id: functionId,
name: functionCall.name,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
});
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: functionId,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: functionCall.name,
params: functionCall.arguments,
response: ''
}
})
});
}
}
}
/* arg 插入最后一个工具的参数里 */
const arg: string = functionCall?.arguments || '';
const currentTool = functionCalls[functionCalls.length - 1];
if (currentTool) {
currentTool.arguments += arg;
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolParams,
data: JSON.stringify({
tool: {
id: functionId,
toolName: '',
toolAvatar: '',
params: arg,
response: ''
}
})
});
}
}
}
}
if (!textAnswer && functionCalls.length === 0) {
return Promise.reject('LLM api response empty');
}
return { answer: textAnswer, functionCalls };
}

View File

@@ -0,0 +1,147 @@
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type {
DispatchNodeResultType,
RunningModuleItemType
} from '@fastgpt/global/core/module/runtime/type';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolModuleItemType } from './type.d';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
chats2GPTMessages,
getSystemPrompt,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { runToolWithFunctionCall } from './functionCall';
type Response = DispatchNodeResultType<{}>;
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
module: { name, outputs },
runtimeModules,
histories,
params: { model, systemPrompt, userChatInput, history = 6 }
} = props;
const toolModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
/* get tool params */
// get tool output targets
const toolOutput = outputs.find((output) => output.key === ModuleOutputKeyEnum.selectedTools);
if (!toolOutput) {
return Promise.reject('No tool output found');
}
const targets = toolOutput.targets;
// Gets the module to which the tool is connected
const toolModules = targets
.map((item) => {
const tool = runtimeModules.find((module) => module.moduleId === item.moduleId);
return tool;
})
.filter(Boolean)
.map<ToolModuleItemType>((tool) => {
const toolParams = tool?.inputs.filter((input) => !!input.toolDescription) || [];
return {
...(tool as RunningModuleItemType),
toolParams
};
});
const messages: ChatItemType[] = [
...getSystemPrompt(systemPrompt),
...chatHistories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
text: userChatInput,
files: []
})
}
];
const {
dispatchFlowResponse,
totalTokens,
completeMessages = []
} = await (async () => {
if (toolModel.toolChoice) {
return runToolWithToolChoice({
...props,
toolModules,
toolModel,
messages: chats2GPTMessages({ messages, reserveId: false })
});
}
if (toolModel.functionCall) {
return runToolWithFunctionCall({
...props,
toolModules,
toolModel,
messages: chats2GPTMessages({ messages, reserveId: false })
});
}
return {
dispatchFlowResponse: [],
totalTokens: 0,
completeMessages: []
};
})();
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens: totalTokens,
modelType: ModelTypeEnum.llm
});
const adaptMessages = GPTMessages2Chats(completeMessages);
//@ts-ignore
const startIndex = adaptMessages.findLastIndex((item) => item.obj === ChatRoleEnum.Human);
const assistantResponse = adaptMessages.slice(startIndex + 1);
// flat child tool response
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
// concat tool usage
const totalPointsUsage =
totalPoints +
dispatchFlowResponse.reduce((sum, item) => {
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
return sum + childrenTotal;
}, 0);
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
return {
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponse
.map((item) => item.value)
.flat(),
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: totalPointsUsage,
toolCallTokens: totalTokens,
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false)),
toolDetail: childToolResponse
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints,
model: modelName,
tokens: totalTokens
},
...flatUsages
]
};
};

View File

@@ -0,0 +1,371 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
import {
ChatCompletion,
ChatCompletionMessageToolCall,
StreamChatType,
ChatCompletionToolMessageParam,
ChatCompletionAssistantToolParam,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '@fastgpt/service/common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
type ToolRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
export const runToolWithToolChoice = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const tools: ChatCompletionTool[] = toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
type: 'function',
function: {
name: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
}
};
});
const filterMessages = filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
});
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages,
tools,
tool_choice: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answer, toolCalls } = await (async () => {
if (stream) {
return streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const calls = result.choices?.[0]?.message?.tool_calls || [];
// 加上name和avatar
const toolCalls = calls.map((tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
return {
...tool,
toolName: toolModule?.name || '',
toolAvatar: toolModule?.avatar || ''
};
});
return {
answer: result.choices?.[0]?.message?.content || '',
toolCalls: toolCalls
};
}
})();
// Run the selected tool.
const toolsRunResponse = (
await Promise.all(
toolCalls.map(async (tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
if (!toolModule) return;
const startParams = (() => {
try {
return json5.parse(tool.function.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
};
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
}
})
});
}
return {
moduleRunResponse,
toolMsgParams
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
if (toolCalls.length > 0 && !res.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, tools);
// console.log(
// JSON.stringify(
// {
// messages: concatToolMessages,
// tools
// },
// null,
// 2
// )
// );
// console.log(tokens, 'tool');
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
return runToolWithToolChoice(
{
...props,
messages: [...concatToolMessages, ...toolsRunResponse.map((item) => item?.toolMsgParams)]
},
{
dispatchFlowResponse: response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens
}
);
} else {
// No tool is invoked, indicating that the process is over
const completeMessages = filterMessages.concat({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
});
const tokens = countGptMessagesTokens(completeMessages, tools);
// console.log(
// JSON.stringify(
// {
// messages: completeMessages,
// tools
// },
// null,
// 2
// )
// );
// console.log(tokens, 'response token');
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages
};
}
};
async function streamResponse({
res,
detail,
toolModules,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let textAnswer = '';
let toolCalls: ChatCompletionMessageToolCall[] = [];
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
// console.log(JSON.stringify(responseChoice, null, 2));
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (responseChoice.tool_calls?.[0]) {
const toolCall: ChatCompletionMessageToolCall = responseChoice.tool_calls[0];
// 流响应中,每次只会返回一个工具. 如果带了 id说明是执行一个工具
if (toolCall.id) {
const toolModule = toolModules.find(
(module) => module.moduleId === toolCall.function?.name
);
if (toolModule) {
if (toolCall.function?.arguments === undefined) {
toolCall.function.arguments = '';
}
toolCalls.push({
...toolCall,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
});
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: toolCall.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: toolCall.function.name,
params: toolCall.function.arguments,
response: ''
}
})
});
}
}
}
/* arg 插入最后一个工具的参数里 */
const arg: string = responseChoice.tool_calls?.[0]?.function?.arguments;
const currentTool = toolCalls[toolCalls.length - 1];
if (currentTool) {
currentTool.function.arguments += arg;
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolParams,
data: JSON.stringify({
tool: {
id: currentTool.id,
toolName: '',
toolAvatar: '',
params: arg,
response: ''
}
})
});
}
}
}
}
if (!textAnswer && toolCalls.length === 0) {
return Promise.reject('LLM api response empty');
}
return { answer: textAnswer, toolCalls };
}

View File

@@ -0,0 +1,26 @@
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { FlowNodeInputItemType } from '@fastgpt/global/core/module/node/type';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
} from '@fastgpt/global/core/module/type.d';
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type.d';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]: string;
[ModuleInputKeyEnum.userChatInput]: string;
}>;
export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
totalTokens: number;
completeMessages?: ChatCompletionMessageParam[];
};
export type ToolModuleItemType = RunningModuleItemType & {
toolParams: RunningModuleItemType['inputs'];
};

View File

@@ -1,18 +1,35 @@
import type { NextApiResponse } from 'next';
import { ChatContextFilter } from '@fastgpt/service/core/chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import {
filterGPTMessageByMaxTokens,
formatGPTMessagesInRequestBefore,
loadChatImgToBase64
} from '@fastgpt/service/core/chat/utils';
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { sseResponseEventEnum } from '@fastgpt/service/common/response/constant';
import { textAdaptGptResponse } from '@/utils/adapt';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type { ChatCompletion, StreamChatType } from '@fastgpt/global/core/ai/type.d';
import type {
ChatCompletion,
ChatCompletionMessageParam,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '@/service/common/censor';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constant';
import type { ModuleDispatchResponse, ModuleItemType } from '@fastgpt/global/core/module/type.d';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { adaptChat2GptMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type { ModuleItemType } from '@fastgpt/global/core/module/type.d';
import type { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import {
chats2GPTMessages,
getSystemPrompt,
GPTMessages2Chats,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
import type { AIChatModuleProps } from '@fastgpt/global/core/module/node/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
@@ -20,10 +37,11 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { responseWrite, responseWriteController } from '@fastgpt/service/common/response';
import { getLLMModel, ModelTypeEnum } from '@fastgpt/service/core/ai/model';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { formatStr2ChatContent } from '@fastgpt/service/core/chat/utils';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
export type ChatProps = ModuleDispatchProps<
AIChatModuleProps & {
@@ -32,7 +50,7 @@ export type ChatProps = ModuleDispatchProps<
[ModuleInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
}
>;
export type ChatResponse = ModuleDispatchResponse<{
export type ChatResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[ModuleOutputKeyEnum.history]: ChatItemType[];
}>;
@@ -46,6 +64,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
user,
histories,
module: { name, outputs },
inputFiles = [],
params: {
model,
temperature = 0,
@@ -59,10 +78,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
quotePrompt
}
} = props;
if (!userChatInput) {
if (!userChatInput && inputFiles.length === 0) {
return Promise.reject('Question is empty');
}
stream = stream && isResponseAnswerText;
const chatHistories = getHistories(history, histories);
@@ -74,7 +92,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
const { filterQuoteQA, quoteText } = filterQuote({
const { quoteText } = filterQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
@@ -90,14 +108,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
});
}
const { messages, filterMessages } = getChatMessages({
const { filterMessages } = getChatMessages({
model: modelConstantsData,
histories: chatHistories,
quoteText,
quotePrompt,
userChatInput,
inputFiles,
systemPrompt
});
const { max_tokens } = await getMaxTokens({
model: modelConstantsData,
maxToken,
@@ -121,20 +141,26 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
]
: []),
...(await Promise.all(
messages.map(async (item) => ({
...item,
content: modelConstantsData.vision
? await formatStr2ChatContent(item.content)
: item.content
}))
))
];
...formatGPTMessagesInRequestBefore(filterMessages)
] as ChatCompletionMessageParam[];
if (concatMessages.length === 0) {
return Promise.reject('core.chat.error.Messages empty');
}
const loadMessages = await Promise.all(
concatMessages.map(async (item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
return {
...item,
content: await loadChatImgToBase64(item.content)
};
} else {
return item;
}
})
);
const response = await ai.chat.completions.create(
{
...modelConstantsData?.defaultConfig,
@@ -142,7 +168,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
temperature,
max_tokens,
stream,
messages: concatMessages
messages: loadMessages
},
{
headers: {
@@ -151,7 +177,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
);
const { answerText, completeMessages } = await (async () => {
const { answerText } = await (async () => {
if (stream) {
// sse response
const { answer } = await streamResponse({
@@ -159,35 +185,29 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
detail,
stream: response
});
// count tokens
const completeMessages = filterMessages.concat({
obj: ChatRoleEnum.AI,
value: answer
});
targetResponse({ res, detail, outputs });
return {
answerText: answer,
completeMessages
answerText: answer
};
} else {
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
const completeMessages = filterMessages.concat({
obj: ChatRoleEnum.AI,
value: answer
});
return {
answerText: answer,
completeMessages
answerText: answer
};
}
})();
const tokens = countMessagesTokens(completeMessages);
const completeMessages = filterMessages.concat({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answerText
});
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
const tokens = countMessagesTokens(chatCompleteMessages);
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens,
@@ -196,17 +216,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return {
answerText,
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
quoteList: filterQuoteQA,
historyPreview: getHistoryPreview(completeMessages),
historyPreview: getHistoryPreview(chatCompleteMessages),
contextTotalLen: completeMessages.length
},
[ModuleOutputKeyEnum.moduleDispatchBills]: [
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
@@ -214,7 +233,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
tokens
}
],
history: completeMessages
history: chatCompleteMessages
};
};
@@ -256,6 +275,7 @@ function getChatMessages({
histories = [],
systemPrompt,
userChatInput,
inputFiles,
model
}: {
quotePrompt?: string;
@@ -263,9 +283,10 @@ function getChatMessages({
histories: ChatItemType[];
systemPrompt: string;
userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType;
}) {
const question = quoteText
const replaceInputValue = quoteText
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
quote: quoteText,
question: userChatInput
@@ -273,30 +294,24 @@ function getChatMessages({
: userChatInput;
const messages: ChatItemType[] = [
...(systemPrompt
? [
{
obj: ChatRoleEnum.System,
value: systemPrompt
}
]
: []),
...getSystemPrompt(systemPrompt),
...histories,
{
obj: ChatRoleEnum.Human,
value: question
value: runtimePrompt2ChatsValue({
files: inputFiles,
text: replaceInputValue
})
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = ChatContextFilter({
messages,
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: model.maxContext - 300 // filter token. not response maxToken
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
return {
messages: adaptMessages,
filterMessages
};
}
@@ -307,17 +322,17 @@ function getMaxTokens({
}: {
maxToken: number;
model: LLMModelItemType;
filterMessages: ChatItemType[];
filterMessages: ChatCompletionMessageParam[];
}) {
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = countMessagesTokens(filterMessages);
const promptsToken = countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
return Promise.reject('Over max token');
maxToken = 200;
}
return {
max_tokens: maxToken
@@ -339,7 +354,7 @@ function targetResponse({
if (targets.length === 0) return;
responseWrite({
res,
event: detail ? sseResponseEventEnum.answer : undefined,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
@@ -370,7 +385,7 @@ async function streamResponse({
responseWrite({
write,
event: detail ? sseResponseEventEnum.answer : undefined,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
@@ -383,14 +398,3 @@ async function streamResponse({
return { answer };
}
function getHistoryPreview(completeMessages: ChatItemType[]) {
return completeMessages.map((item, i) => {
if (item.obj === ChatRoleEnum.System) return item;
if (i >= completeMessages.length - 2) return item;
return {
...item,
value: item.value.length > 15 ? `${item.value.slice(0, 15)}...` : item.value
};
});
}

View File

@@ -1,18 +1,19 @@
import type { moduleDispatchResType } from '@fastgpt/global/core/chat/type.d';
import {
DispatchNodeResponseType,
DispatchNodeResultType
} from '@fastgpt/global/core/module/runtime/type.d';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import type { SelectedDatasetType } from '@fastgpt/global/core/module/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type {
ModuleDispatchProps,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModelTypeEnum, getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { searchDatasetData } from '@/service/core/dataset/data/controller';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '@fastgpt/service/core/dataset/search/utils';
import { ChatModuleUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '@fastgpt/service/support/permission/teamLimit';
type DatasetSearchProps = ModuleDispatchProps<{
@@ -26,7 +27,7 @@ type DatasetSearchProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.datasetSearchExtensionModel]: string;
[ModuleInputKeyEnum.datasetSearchExtensionBg]: string;
}>;
export type DatasetSearchResponse = ModuleDispatchResponse<{
export type DatasetSearchResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.datasetIsEmpty]?: boolean;
[ModuleOutputKeyEnum.datasetUnEmpty]?: boolean;
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
@@ -107,7 +108,7 @@ export async function dispatchDatasetSearch(
tokens,
modelType: ModelTypeEnum.vector
});
const responseData: moduleDispatchResType & { totalPoints: number } = {
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
@@ -115,9 +116,10 @@ export async function dispatchDatasetSearch(
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank
searchUsingReRank: searchUsingReRank,
quoteList: searchRes
};
const moduleDispatchBills: ChatModuleUsageType[] = [
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: module.name,
@@ -140,7 +142,7 @@ export async function dispatchDatasetSearch(
aiExtensionResult.extensionQueries?.join('\n') ||
JSON.stringify(aiExtensionResult.extensionQueries);
moduleDispatchBills.push({
nodeDispatchUsages.push({
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
@@ -152,7 +154,11 @@ export async function dispatchDatasetSearch(
isEmpty: searchRes.length === 0 ? true : undefined,
unEmpty: searchRes.length > 0 ? true : undefined,
quoteQA: searchRes,
responseData,
moduleDispatchBills
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
nodeDispatchUsages,
[DispatchNodeResponseKeyEnum.toolResponses]: searchRes.map((item) => ({
text: `${item.q}\n${item.a}`.trim(),
chunkIndex: item.chunkIndex
}))
};
}

View File

@@ -1,16 +1,20 @@
import { NextApiResponse } from 'next';
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ChatDispatchProps, RunningModuleItemType } from '@fastgpt/global/core/module/type.d';
import { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type.d';
import type { ChatDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type {
AIChatItemValueItemType,
ChatHistoryItemResType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type.d';
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { ModuleItemType } from '@fastgpt/global/core/module/type';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { responseWrite } from '@fastgpt/service/common/response';
import { sseResponseEventEnum } from '@fastgpt/service/common/response/constant';
import { responseWriteNodeStatus } from '@fastgpt/service/common/response';
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
import { initRunningModuleType } from '../core/modules/constant';
import { dispatchHistory } from './init/history';
import { dispatchChatInput } from './init/userChatInput';
@@ -27,8 +31,11 @@ import { dispatchQueryExtension } from './tools/queryExternsion';
import { dispatchRunPlugin } from './plugin/run';
import { dispatchPluginInput } from './plugin/runInput';
import { dispatchPluginOutput } from './plugin/runOutput';
import { valueTypeFormat } from './utils';
import { ChatModuleUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTheModuleConnectedByTool, valueTypeFormat } from './utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { dispatchRunTools } from './agent/runTool/index';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { DispatchFlowResponse } from './type';
const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
[FlowNodeTypeEnum.historyNode]: dispatchHistory,
@@ -46,26 +53,29 @@ const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
[FlowNodeTypeEnum.queryExtension]: dispatchQueryExtension,
[FlowNodeTypeEnum.tools]: dispatchRunTools,
// none
[FlowNodeTypeEnum.userGuide]: () => Promise.resolve()
};
/* running */
export async function dispatchModules({
export async function dispatchWorkFlow({
res,
modules,
histories = [],
modules = [],
runtimeModules,
startParams = {},
histories = [],
variables = {},
user,
stream = false,
detail = false,
...props
}: ChatDispatchProps & {
modules: ModuleItemType[];
startParams?: Record<string, any>;
}) {
modules?: ModuleItemType[]; // app modules
runtimeModules?: RunningModuleItemType[];
startParams?: Record<string, any>; // entry module params
}): Promise<DispatchFlowResponse> {
// set sse response headers
if (stream) {
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
@@ -78,48 +88,70 @@ export async function dispatchModules({
...getSystemVariable({ timezone: user.timezone }),
...variables
};
const runningModules = loadModules(modules, variables);
const runningModules = runtimeModules ? runtimeModules : loadModules(modules, variables);
// let storeData: Record<string, any> = {}; // after module used
let chatResponse: ChatHistoryItemResType[] = []; // response request and save to database
let chatAnswerText = ''; // AI answer
let chatModuleBills: ChatModuleUsageType[] = [];
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
let chatNodeUsages: ChatNodeUsageType[] = [];
let toolRunResponse: ToolRunResponseItemType[] = [];
let runningTime = Date.now();
/* Store special response field */
function pushStore(
{ inputs = [] }: RunningModuleItemType,
{
answerText = '',
responseData,
moduleDispatchBills
nodeDispatchUsages,
toolResponses,
assistantResponses
}: {
answerText?: string;
responseData?: ChatHistoryItemResType | ChatHistoryItemResType[];
moduleDispatchBills?: ChatModuleUsageType[];
[ModuleOutputKeyEnum.answerText]?: string;
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]?: AIChatItemValueItemType[]; // tool module, save the response value
}
) {
const time = Date.now();
if (responseData) {
if (Array.isArray(responseData)) {
chatResponse = chatResponse.concat(responseData);
} else {
chatResponse.push({
...responseData,
runningTime: +((time - runningTime) / 1000).toFixed(2)
chatResponses.push({
...responseData,
runningTime: +((time - runningTime) / 1000).toFixed(2)
});
}
if (nodeDispatchUsages) {
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
}
if (toolResponses) {
if (Array.isArray(toolResponses) && toolResponses.length > 0) {
toolRunResponse.push(toolResponses);
} else if (Object.keys(toolResponses).length > 0) {
toolRunResponse.push(toolResponses);
}
}
if (assistantResponses) {
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
}
// save assistant text response
if (answerText) {
const isResponseAnswerText =
inputs.find((item) => item.key === ModuleInputKeyEnum.aiChatIsResponseText)?.value ?? true;
if (isResponseAnswerText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.text,
text: {
content: answerText
}
});
}
}
if (moduleDispatchBills) {
chatModuleBills = chatModuleBills.concat(moduleDispatchBills);
}
runningTime = time;
const isResponseAnswerText =
inputs.find((item) => item.key === ModuleInputKeyEnum.aiChatIsResponseText)?.value ?? true;
if (isResponseAnswerText) {
chatAnswerText += answerText;
}
runningTime = time;
}
/* Inject data into module input */
function moduleInput(module: RunningModuleItemType, data: Record<string, any> = {}) {
const updateInputValue = (key: string, value: any) => {
const index = module.inputs.findIndex((item: any) => item.key === key);
@@ -132,6 +164,7 @@ export async function dispatchModules({
return;
}
/* Pass the output of the module to the next stage */
function moduleOutput(
module: RunningModuleItemType,
result: Record<string, any> = {}
@@ -207,6 +240,7 @@ export async function dispatchModules({
stream,
detail,
module,
runtimeModules: runningModules,
params
};
@@ -218,20 +252,23 @@ export async function dispatchModules({
return {};
})();
// format response data. Add modulename and moduletype
const formatResponseData = (() => {
if (!dispatchRes[ModuleOutputKeyEnum.responseData]) return undefined;
if (Array.isArray(dispatchRes[ModuleOutputKeyEnum.responseData])) {
return dispatchRes[ModuleOutputKeyEnum.responseData];
}
// format response data. Add modulename and module type
const formatResponseData: ChatHistoryItemResType = (() => {
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
return {
moduleName: module.name,
moduleType: module.flowType,
...dispatchRes[ModuleOutputKeyEnum.responseData]
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
};
})();
// Add output default value
module.outputs.forEach((item) => {
if (!item.required) return;
if (dispatchRes[item.key] !== undefined) return;
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
});
// Pass userChatInput
const hasUserChatInputTarget = !!module.outputs.find(
(item) => item.key === ModuleOutputKeyEnum.userChatInput
@@ -243,17 +280,17 @@ export async function dispatchModules({
? params[ModuleOutputKeyEnum.userChatInput]
: undefined,
...dispatchRes,
[ModuleOutputKeyEnum.responseData]: formatResponseData,
[ModuleOutputKeyEnum.moduleDispatchBills]:
dispatchRes[ModuleOutputKeyEnum.moduleDispatchBills]
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData,
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]:
dispatchRes[DispatchNodeResponseKeyEnum.nodeDispatchUsages]
});
}
// start process width initInput
const initModules = runningModules.filter((item) => initRunningModuleType[item.flowType]);
// runningModules.forEach((item) => {
// console.log(item);
// });
const initModules = runningModules.filter((item) => item.isEntry);
// reset entry
modules.forEach((item) => {
item.isEntry = false;
});
initModules.map((module) =>
moduleInput(module, {
@@ -272,9 +309,11 @@ export async function dispatchModules({
}
return {
[ModuleOutputKeyEnum.answerText]: chatAnswerText,
[ModuleOutputKeyEnum.responseData]: chatResponse,
[ModuleOutputKeyEnum.moduleDispatchBills]: chatModuleBills
flowResponses: chatResponses,
flowUsages: chatNodeUsages,
[DispatchNodeResponseKeyEnum.assistantResponses]:
concatAssistantResponseAnswerText(chatAssistantResponse),
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse
};
}
@@ -287,18 +326,36 @@ function loadModules(
.filter((item) => {
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
})
.map((module) => {
.map<RunningModuleItemType>((module) => {
return {
moduleId: module.moduleId,
name: module.name,
avatar: module.avatar,
intro: module.intro,
flowType: module.flowType,
showStatus: module.showStatus,
isEntry: module.isEntry,
inputs: module.inputs
.filter(
(item) =>
item.type === FlowNodeInputTypeEnum.systemInput ||
item.connected ||
item.value !== undefined
/*
1. system input must be save
2. connected by source handle
3. manual input value or have default value
4. For the module connected by the tool, leave the toolDescription input
*/
(item) => {
const isTool = checkTheModuleConnectedByTool(modules, module);
if (isTool && item.toolDescription) {
return true;
}
return (
item.type === FlowNodeInputTypeEnum.systemInput ||
item.connected ||
item.value !== undefined
);
}
) // filter unconnected target input
.map((item) => {
const replace = ['string'].includes(typeof item.value);
@@ -307,12 +364,16 @@ function loadModules(
key: item.key,
// variables replace
value: replace ? replaceVariable(item.value, variables) : item.value,
valueType: item.valueType
valueType: item.valueType,
required: item.required,
toolDescription: item.toolDescription
};
}),
outputs: module.outputs
.map((item) => ({
key: item.key,
required: item.required,
defaultValue: item.defaultValue,
answer: item.key === ModuleOutputKeyEnum.answerText,
value: undefined,
valueType: item.valueType,
@@ -339,13 +400,9 @@ export function responseStatus({
name?: string;
}) {
if (!name) return;
responseWrite({
responseWriteNodeStatus({
res,
event: sseResponseEventEnum.moduleStatus,
data: JSON.stringify({
status: 'running',
name
})
name
});
}
@@ -355,3 +412,22 @@ export function getSystemVariable({ timezone }: { timezone: string }) {
cTime: getSystemTime(timezone)
};
}
export const concatAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
const result: AIChatItemValueItemType[] = [];
// 合并连续的text
for (let i = 0; i < response.length; i++) {
const item = response[i];
if (item.type === ChatItemValueTypeEnum.text) {
let text = item.text?.content || '';
const lastItem = result[result.length - 1];
if (lastItem && lastItem.type === ChatItemValueTypeEnum.text && lastItem.text?.content) {
lastItem.text.content += text;
continue;
}
}
result.push(item);
}
return result;
};

View File

@@ -1,30 +1,25 @@
import type {
ModuleDispatchProps,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import { dispatchModules } from '../index';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { dispatchWorkFlow } from '../index';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DYNAMIC_INPUT_KEY, ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getPluginRuntimeById } from '@fastgpt/service/core/plugin/controller';
import { authPluginCanUse } from '@fastgpt/service/support/permission/auth/plugin';
import { setEntryEntries } from '../utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type RunPluginProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.pluginId]: string;
[key: string]: any;
}>;
type RunPluginResponse = ModuleDispatchResponse<{
[ModuleOutputKeyEnum.answerText]: string;
}>;
type RunPluginResponse = DispatchNodeResultType<{}>;
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
const {
mode,
teamId,
tmbId,
module,
params: { pluginId, ...data }
} = props;
@@ -59,45 +54,46 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
return params;
})();
const { responseData, moduleDispatchBills, answerText } = await dispatchModules({
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
...props,
modules: plugin.modules.map((module) => ({
modules: setEntryEntries(plugin.modules).map((module) => ({
...module,
showStatus: false
})),
runtimeModules: undefined, // must reset
startParams
});
const output = responseData.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
if (output) {
output.moduleLogo = plugin.avatar;
}
return {
answerText,
assistantResponses,
// responseData, // debug
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: plugin.avatar,
totalPoints: responseData.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
runningTime: responseData.reduce((sum, item) => sum + (item.runningTime || 0), 0),
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
pluginOutput: output?.pluginOutput,
pluginDetail:
mode === 'test' && plugin.teamId === teamId
? responseData.filter((item) => {
? flowResponses.filter((item) => {
const filterArr = [FlowNodeTypeEnum.pluginOutput];
return !filterArr.includes(item.moduleType as any);
})
: undefined
},
[ModuleOutputKeyEnum.moduleDispatchBills]: [
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: plugin.name,
totalPoints: moduleDispatchBills.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
model: plugin.name,
tokens: 0
}
],
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput ? output.pluginOutput : {},
...(output ? output.pluginOutput : {})
};
};

View File

@@ -1,19 +1,17 @@
import type { moduleDispatchResType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type.d';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
export type PluginOutputProps = ModuleDispatchProps<{
[key: string]: any;
}>;
export type PluginOutputResponse = {
[ModuleOutputKeyEnum.responseData]: moduleDispatchResType;
};
export type PluginOutputResponse = DispatchNodeResultType<{}>;
export const dispatchPluginOutput = (props: PluginOutputProps): PluginOutputResponse => {
const { params } = props;
return {
responseData: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
pluginOutput: params
}

View File

@@ -1,6 +1,6 @@
import { sseResponseEventEnum } from '@fastgpt/service/common/response/constant';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { responseWrite } from '@fastgpt/service/common/response';
import { textAdaptGptResponse } from '@/utils/adapt';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
export type AnswerProps = ModuleDispatchProps<{
@@ -23,7 +23,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
if (stream) {
responseWrite({
res,
event: detail ? sseResponseEventEnum.response : undefined,
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
data: textAdaptGptResponse({
text: `\n${formatText}`
})

View File

@@ -1,16 +1,14 @@
import type { moduleDispatchResType } from '@fastgpt/global/core/chat/type.d';
import type {
ModuleDispatchProps,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '@fastgpt/service/common/system/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type HttpRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.abandon_httpUrl]: string;
@@ -19,7 +17,7 @@ type HttpRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.httpHeaders]: string;
[key: string]: any;
}>;
type HttpResponse = ModuleDispatchResponse<{
type HttpResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
@@ -99,7 +97,7 @@ export const dispatchHttpRequest = async (props: HttpRequestProps): Promise<Http
}
return {
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: formatBody,
httpResult: response
@@ -111,7 +109,7 @@ export const dispatchHttpRequest = async (props: HttpRequestProps): Promise<Http
return {
[ModuleOutputKeyEnum.failed]: true,
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: formatBody,
httpResult: { error }

View File

@@ -1,16 +1,15 @@
import type {
ModuleDispatchProps,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '@fastgpt/service/common/system/tools';
import { addLog } from '@fastgpt/service/common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type PropsArrType = {
key: string;
@@ -27,7 +26,7 @@ type HttpRequestProps = ModuleDispatchProps<{
[DYNAMIC_INPUT_KEY]: Record<string, any>;
[key: string]: any;
}>;
type HttpResponse = ModuleDispatchResponse<{
type HttpResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
@@ -40,7 +39,7 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
chatId,
responseChatItemId,
variables,
module: { outputs },
module: { moduleId, outputs },
histories,
params: {
system_httpMethod: httpMethod = 'POST',
@@ -119,20 +118,22 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
}
return {
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: rawResponse
},
[DispatchNodeResponseKeyEnum.toolResponses]: results,
[ModuleOutputKeyEnum.httpRawResponse]: rawResponse,
...results
};
} catch (error) {
addLog.error('Http request error', error);
return {
[ModuleOutputKeyEnum.failed]: true,
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,

View File

@@ -1,14 +1,13 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type {
ModuleDispatchProps,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import { queryExtension } from '@fastgpt/service/core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
@@ -16,7 +15,7 @@ type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.userChatInput]: string;
}>;
type Response = ModuleDispatchResponse<{
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.text]: string;
}>;
@@ -57,14 +56,14 @@ export const dispatchQueryExtension = async ({
});
return {
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints,
model: modelName,
tokens,
query: userChatInput,
textOutput: JSON.stringify(filterSameQueries)
},
[ModuleOutputKeyEnum.moduleDispatchBills]: [
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: module.name,
totalPoints,

View File

@@ -1,24 +1,24 @@
import type { moduleDispatchResType, ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type {
ModuleDispatchProps,
ModuleDispatchResponse
} from '@fastgpt/global/core/module/type.d';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { SelectAppItemType } from '@fastgpt/global/core/module/type';
import { dispatchModules } from '../index';
import { dispatchWorkFlow } from '../index';
import { MongoApp } from '@fastgpt/service/core/app/schema';
import { responseWrite } from '@fastgpt/service/common/response';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { sseResponseEventEnum } from '@fastgpt/service/common/response/constant';
import { textAdaptGptResponse } from '@/utils/adapt';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { getHistories } from '../utils';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getHistories, setEntryEntries } from '../utils';
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
app: SelectAppItemType;
}>;
type Response = ModuleDispatchResponse<{
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[ModuleOutputKeyEnum.history]: ChatItemType[];
}>;
@@ -30,6 +30,7 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
stream,
detail,
histories,
inputFiles,
params: { userChatInput, history, app }
} = props;
let start = Date.now();
@@ -50,7 +51,7 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
if (stream) {
responseWrite({
res,
event: detail ? sseResponseEventEnum.answer : undefined,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
@@ -59,11 +60,13 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
const chatHistories = getHistories(history, histories);
const { responseData, moduleDispatchBills, answerText } = await dispatchModules({
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
...props,
appId: app.id,
modules: appData.modules,
modules: setEntryEntries(appData.modules),
runtimeModules: undefined, // must reset
histories: chatHistories,
inputFiles,
startParams: {
userChatInput
}
@@ -72,28 +75,33 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
const completeMessages = chatHistories.concat([
{
obj: ChatRoleEnum.Human,
value: userChatInput
value: runtimePrompt2ChatsValue({
files: inputFiles,
text: userChatInput
})
},
{
obj: ChatRoleEnum.AI,
value: answerText
value: assistantResponses
}
]);
const { text } = chatValue2RuntimePrompt(assistantResponses);
return {
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: appData.avatar,
query: userChatInput,
textOutput: answerText,
totalPoints: responseData.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
textOutput: text,
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
},
[ModuleOutputKeyEnum.moduleDispatchBills]: [
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: appData.name,
totalPoints: responseData.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
}
],
answerText: answerText,
answerText: text,
history: completeMessages
};
};

View File

@@ -0,0 +1,16 @@
import {
AIChatItemValueItemType,
ChatHistoryItemResType,
ChatItemValueItemType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export type DispatchFlowResponse = {
flowResponses: ChatHistoryItemResType[];
flowUsages: ChatNodeUsageType[];
// [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType[];
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
};

View File

@@ -1,5 +1,43 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { DYNAMIC_INPUT_KEY, ModuleIOValueTypeEnum } from '@fastgpt/global/core/module/constants';
import { ModuleIOValueTypeEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { ModuleItemType } from '@fastgpt/global/core/module/type.d';
export const setEntryEntries = (modules: ModuleItemType[]) => {
const initRunningModuleType: Record<string, boolean> = {
[FlowNodeTypeEnum.historyNode]: true,
[FlowNodeTypeEnum.questionInput]: true,
[FlowNodeTypeEnum.pluginInput]: true
};
modules.forEach((item) => {
if (initRunningModuleType[item.flowType]) {
item.isEntry = true;
}
});
return modules;
};
export const checkTheModuleConnectedByTool = (
modules: ModuleItemType[],
module: ModuleItemType
) => {
let sign = false;
const toolModules = modules.filter((item) => item.flowType === FlowNodeTypeEnum.tools);
toolModules.forEach((item) => {
const toolOutput = item.outputs.find(
(output) => output.key === ModuleOutputKeyEnum.selectedTools
);
toolOutput?.targets.forEach((target) => {
if (target.moduleId === module.moduleId) {
sign = true;
}
});
});
return sign;
};
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
if (!history) return [];

View File

@@ -3,7 +3,7 @@ import { ModelTypeEnum } from '@fastgpt/service/core/ai/model';
import { addLog } from '@fastgpt/service/common/system/log';
import { createUsage, concatUsage } from './controller';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import { ChatModuleUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export const pushChatUsage = ({
appName,
@@ -11,16 +11,16 @@ export const pushChatUsage = ({
teamId,
tmbId,
source,
moduleDispatchBills
flowUsages
}: {
appName: string;
appId: string;
teamId: string;
tmbId: string;
source: `${UsageSourceEnum}`;
moduleDispatchBills: ChatModuleUsageType[];
flowUsages: ChatNodeUsageType[];
}) => {
const totalPoints = moduleDispatchBills.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
const totalPoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
createUsage({
teamId,
@@ -29,7 +29,7 @@ export const pushChatUsage = ({
appId,
totalPoints,
source,
list: moduleDispatchBills.map((item) => ({
list: flowUsages.map((item) => ({
moduleName: item.moduleName,
amount: item.totalPoints || 0,
model: item.model,

View File

@@ -1,10 +1,14 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type {
AIChatItemType,
ChatItemType,
UserChatItemType
} from '@fastgpt/global/core/chat/type.d';
import { MongoApp } from '@fastgpt/service/core/app/schema';
import { ChatSourceEnum } from '@fastgpt/global/core/chat/constants';
import { MongoChatItem } from '@fastgpt/service/core/chat/chatItemSchema';
import { MongoChat } from '@fastgpt/service/core/chat/chatSchema';
import { addLog } from '@fastgpt/service/common/system/log';
import { chatContentReplaceBlock } from '@fastgpt/global/core/chat/utils';
import { getChatTitleFromChatMessage } from '@fastgpt/global/core/chat/utils';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
type Props = {
@@ -17,7 +21,7 @@ type Props = {
source: `${ChatSourceEnum}`;
shareId?: string;
outLinkUid?: string;
content: [ChatItemType, ChatItemType];
content: [UserChatItemType & { dataId?: string }, AIChatItemType & { dataId?: string }];
metadata?: Record<string, any>;
};
@@ -47,10 +51,7 @@ export async function saveChat({
...chat?.metadata,
...metadata
};
const title =
chatContentReplaceBlock(content[0].value).slice(0, 20) ||
content[1]?.value?.slice(0, 20) ||
'Chat';
const title = getChatTitleFromChatMessage(content[0]);
await mongoSessionRun(async (session) => {
await MongoChatItem.insertMany(