v4.6.9-alpha (#918)

Co-authored-by: Mufei <327958099@qq.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-03-04 00:05:25 +08:00
committed by GitHub
parent f9f0b4bffd
commit 42a8184ea0
153 changed files with 4906 additions and 4307 deletions

View File

@@ -1,6 +1,7 @@
import { adaptChat2GptMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatContextFilter, countMessagesChars } from '@fastgpt/service/core/chat/utils';
import type { moduleDispatchResType, ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatContextFilter } from '@fastgpt/service/core/chat/utils';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type {
@@ -14,7 +15,7 @@ import { Prompt_CQJson } from '@/global/core/prompt/agent';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '@/service/support/wallet/usage/utils';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
@@ -46,7 +47,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
const chatHistories = getHistories(history, histories);
const { arg, charsLength } = await (async () => {
const { arg, tokens } = await (async () => {
if (cqModel.toolChoice) {
return toolChoice({
...props,
@@ -65,7 +66,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
const { totalPoints, modelName } = formatModelChars2Points({
model: cqModel.model,
charsLength,
tokens,
modelType: ModelTypeEnum.llm
});
@@ -75,7 +76,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: userChatInput,
charsLength,
tokens,
cqList: agents,
cqResult: result.value,
contextTotalLen: chatHistories.length + 2
@@ -85,7 +86,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
charsLength
tokens
}
]
};
@@ -136,6 +137,13 @@ ${systemPrompt}
required: ['type']
}
};
const tools: any = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
@@ -144,13 +152,8 @@ ${systemPrompt}
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: [...adaptMessages],
tools: [
{
type: 'function',
function: agentFunction
}
],
messages: adaptMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
@@ -158,13 +161,10 @@ ${systemPrompt}
const arg = JSON.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
);
const functionChars =
agentFunction.description.length +
agentFunction.parameters.properties.type.description.length;
return {
arg,
charsLength: countMessagesChars(messages) + functionChars
tokens: countMessagesTokens(messages, tools)
};
} catch (error) {
console.log(agentFunction.parameters);
@@ -174,7 +174,7 @@ ${systemPrompt}
return {
arg: {},
charsLength: 0
tokens: 0
};
}
}
@@ -216,7 +216,7 @@ async function completions({
agents.find((item) => answer.includes(item.key) || answer.includes(item.value))?.key || '';
return {
charsLength: countMessagesChars(messages),
tokens: countMessagesTokens(messages),
arg: { type: id }
};
}

View File

@@ -1,6 +1,7 @@
import { adaptChat2GptMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatContextFilter, countMessagesChars } from '@fastgpt/service/core/chat/utils';
import type { moduleDispatchResType, ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatContextFilter } from '@fastgpt/service/core/chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type {
@@ -14,7 +15,7 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getHistories } from '../utils';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { formatModelChars2Points } from '@/service/support/wallet/usage/utils';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
@@ -46,7 +47,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
const extractModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { arg, charsLength } = await (async () => {
const { arg, tokens } = await (async () => {
if (extractModel.toolChoice) {
return toolChoice({
...props,
@@ -85,7 +86,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
const { totalPoints, modelName } = formatModelChars2Points({
model: extractModel.model,
charsLength,
tokens,
modelType: ModelTypeEnum.llm
});
@@ -98,7 +99,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: content,
charsLength,
tokens,
extractDescription: description,
extractResult: arg,
contextTotalLen: chatHistories.length + 2
@@ -108,7 +109,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
charsLength
tokens
}
]
};
@@ -170,6 +171,12 @@ ${description || '根据用户要求获取适当的 JSON 字符串。'}
required: extractKeys.filter((item) => item.required).map((item) => item.key)
}
};
const tools: any = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
@@ -180,12 +187,7 @@ ${description || '根据用户要求获取适当的 JSON 字符串。'}
model: extractModel.model,
temperature: 0,
messages: [...adaptMessages],
tools: [
{
type: 'function',
function: agentFunction
}
],
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
@@ -202,12 +204,9 @@ ${description || '根据用户要求获取适当的 JSON 字符串。'}
}
})();
const functionChars =
description.length + extractKeys.reduce((sum, item) => sum + item.desc.length, 0);
return {
rawResponse: response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '',
charsLength: countMessagesChars(messages) + functionChars,
tokens: countMessagesTokens(messages, tools),
arg
};
}
@@ -257,7 +256,7 @@ Human: ${content}`
if (start === -1 || end === -1)
return {
rawResponse: answer,
charsLength: countMessagesChars(messages),
tokens: countMessagesTokens(messages),
arg: {}
};
@@ -269,14 +268,14 @@ Human: ${content}`
try {
return {
rawResponse: answer,
charsLength: countMessagesChars(messages),
tokens: countMessagesTokens(messages),
arg: JSON.parse(jsonStr) as Record<string, any>
};
} catch (error) {
return {
rawResponse: answer,
charsLength: countMessagesChars(messages),
tokens: countMessagesTokens(messages),
arg: {}
};
}

View File

@@ -1,17 +1,17 @@
import type { NextApiResponse } from 'next';
import { ChatContextFilter, countMessagesChars } from '@fastgpt/service/core/chat/utils';
import type { moduleDispatchResType, ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatContextFilter } from '@fastgpt/service/core/chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { sseResponseEventEnum } from '@fastgpt/service/common/response/constant';
import { textAdaptGptResponse } from '@/utils/adapt';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type { ChatCompletion, StreamChatType } from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '@/service/support/wallet/usage/utils';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '@/service/common/censor';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constant';
import type { ModuleDispatchResponse, ModuleItemType } from '@fastgpt/global/core/module/type.d';
import { countMessagesTokens, sliceMessagesTB } from '@fastgpt/global/common/string/tiktoken';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { adaptChat2GptMessages } from '@fastgpt/global/core/chat/adapt';
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
import type { AIChatModuleProps } from '@fastgpt/global/core/module/node/type.d';
@@ -98,7 +98,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
userChatInput,
systemPrompt
});
const { max_tokens } = getMaxTokens({
const { max_tokens } = await getMaxTokens({
model: modelConstantsData,
maxToken,
filterMessages
@@ -137,8 +137,6 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const response = await ai.chat.completions.create(
{
presence_penalty: 0,
frequency_penalty: 0,
...modelConstantsData?.defaultConfig,
model: modelConstantsData.model,
temperature,
@@ -189,10 +187,10 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
})();
const charsLength = countMessagesChars(completeMessages);
const tokens = countMessagesTokens(completeMessages);
const { totalPoints, modelName } = formatModelChars2Points({
model,
charsLength,
tokens,
modelType: ModelTypeEnum.llm
});
@@ -201,7 +199,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
[ModuleOutputKeyEnum.responseData]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
charsLength,
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
quoteList: filterQuoteQA,
@@ -213,7 +211,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
charsLength
tokens
}
],
history: completeMessages
@@ -292,7 +290,7 @@ function getChatMessages({
const filterMessages = ChatContextFilter({
messages,
maxTokens: Math.ceil(model.maxContext - 300) // filter token. not response maxToken
maxTokens: model.maxContext - 300 // filter token. not response maxToken
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
@@ -315,11 +313,12 @@ function getMaxTokens({
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = countMessagesTokens({
messages: filterMessages
});
const promptsToken = countMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
return Promise.reject('Over max token');
}
return {
max_tokens: maxToken
};

View File

@@ -1,5 +1,5 @@
import type { moduleDispatchResType } from '@fastgpt/global/core/chat/type.d';
import { formatModelChars2Points } from '@/service/support/wallet/usage/utils';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import type { SelectedDatasetType } from '@fastgpt/global/core/module/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type {
@@ -12,7 +12,7 @@ import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/mo
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '@fastgpt/service/core/dataset/search/utils';
import { ChatModuleBillType } from '@fastgpt/global/support/wallet/bill/type';
import { ChatModuleUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '@fastgpt/service/support/permission/teamLimit';
type DatasetSearchProps = ModuleDispatchProps<{
@@ -85,7 +85,7 @@ export async function dispatchDatasetSearch(
// start search
const {
searchRes,
charsLength,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank
} = await searchDatasetData({
@@ -104,37 +104,37 @@ export async function dispatchDatasetSearch(
// vector
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
charsLength,
tokens,
modelType: ModelTypeEnum.vector
});
const responseData: moduleDispatchResType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
charsLength,
tokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank
};
const moduleDispatchBills: ChatModuleBillType[] = [
const moduleDispatchBills: ChatModuleUsageType[] = [
{
totalPoints,
moduleName: module.name,
model: modelName,
charsLength
tokens
}
];
if (aiExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: aiExtensionResult.model,
charsLength: aiExtensionResult.charsLength,
tokens: aiExtensionResult.tokens,
modelType: ModelTypeEnum.llm
});
responseData.totalPoints += totalPoints;
responseData.charsLength = aiExtensionResult.charsLength;
responseData.tokens = aiExtensionResult.tokens;
responseData.extensionModel = modelName;
responseData.extensionResult =
aiExtensionResult.extensionQueries?.join('\n') ||
@@ -144,7 +144,7 @@ export async function dispatchDatasetSearch(
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
charsLength: aiExtensionResult.charsLength
tokens: aiExtensionResult.tokens
});
}

View File

@@ -28,7 +28,7 @@ import { dispatchRunPlugin } from './plugin/run';
import { dispatchPluginInput } from './plugin/runInput';
import { dispatchPluginOutput } from './plugin/runOutput';
import { valueTypeFormat } from './utils';
import { ChatModuleBillType } from '@fastgpt/global/support/wallet/bill/type';
import { ChatModuleUsageType } from '@fastgpt/global/support/wallet/bill/type';
const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
[FlowNodeTypeEnum.historyNode]: dispatchHistory,
@@ -83,7 +83,7 @@ export async function dispatchModules({
// let storeData: Record<string, any> = {}; // after module used
let chatResponse: ChatHistoryItemResType[] = []; // response request and save to database
let chatAnswerText = ''; // AI answer
let chatModuleBills: ChatModuleBillType[] = [];
let chatModuleBills: ChatModuleUsageType[] = [];
let runningTime = Date.now();
function pushStore(
@@ -95,7 +95,7 @@ export async function dispatchModules({
}: {
answerText?: string;
responseData?: ChatHistoryItemResType | ChatHistoryItemResType[];
moduleDispatchBills?: ChatModuleBillType[];
moduleDispatchBills?: ChatModuleUsageType[];
}
) {
const time = Date.now();
@@ -165,7 +165,6 @@ export async function dispatchModules({
const filterModules = nextRunModules.filter((module) => {
if (set.has(module.moduleId)) return false;
set.add(module.moduleId);
``;
return true;
});

View File

@@ -95,7 +95,7 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
moduleName: plugin.name,
totalPoints: moduleDispatchBills.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
model: plugin.name,
charsLength: 0
tokens: 0
}
],
...(output ? output.pluginOutput : {})

View File

@@ -10,6 +10,7 @@ import {
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '@fastgpt/service/common/system/tools';
import { addLog } from '@fastgpt/service/common/system/log';
type PropsArrType = {
key: string;
@@ -130,7 +131,7 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
...results
};
} catch (error) {
const err = httpRequestErrorResponseData(error)
addLog.error('Http request error', error);
return {
[ModuleOutputKeyEnum.failed]: true,
[ModuleOutputKeyEnum.responseData]: {
@@ -138,7 +139,7 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: { error: err }
httpResult: { error: formatHttpError(error) }
}
};
}
@@ -280,21 +281,14 @@ function removeUndefinedSign(obj: Record<string, any>) {
}
return obj;
}
function httpRequestErrorResponseData(error: any) {
try {
return {
message: error?.message || undefined,
name: error?.name || undefined,
method: error?.config?.method || undefined,
baseURL: error?.config?.baseURL || undefined,
url: error?.config?.url || undefined,
code: error?.code || undefined,
status: error?.status || undefined
}
} catch (error) {
return {
message: 'Request Failed',
name: "AxiosError",
};
}
function formatHttpError(error: any) {
return {
message: error?.message,
name: error?.name,
method: error?.config?.method,
baseURL: error?.config?.baseURL,
url: error?.config?.url,
code: error?.code,
status: error?.status
};
}

View File

@@ -5,7 +5,7 @@ import type {
} from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
import { formatModelChars2Points } from '@/service/support/wallet/usage/utils';
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
import { queryExtension } from '@fastgpt/service/core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
@@ -32,7 +32,7 @@ export const dispatchQueryExtension = async ({
const queryExtensionModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { extensionQueries, charsLength } = await queryExtension({
const { extensionQueries, tokens } = await queryExtension({
chatBg: systemPrompt,
query: userChatInput,
histories: chatHistories,
@@ -43,7 +43,7 @@ export const dispatchQueryExtension = async ({
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionModel.model,
charsLength,
tokens,
modelType: ModelTypeEnum.llm
});
@@ -60,7 +60,7 @@ export const dispatchQueryExtension = async ({
[ModuleOutputKeyEnum.responseData]: {
totalPoints,
model: modelName,
charsLength,
tokens,
query: userChatInput,
textOutput: JSON.stringify(filterSameQueries)
},
@@ -69,7 +69,7 @@ export const dispatchQueryExtension = async ({
moduleName: module.name,
totalPoints,
model: modelName,
charsLength
tokens
}
],
[ModuleOutputKeyEnum.text]: JSON.stringify(filterSameQueries)