4.7-alpha2 (#1027)
* feat: stop toolCall and rename some field. (#46) * perf: node delete tip;pay tip * fix: toolCall cannot save child answer * feat: stop tool * fix: team modal * fix feckbackMoal auth bug (#47) * 简单的支持提示词运行tool。优化workflow模板 (#49) * remove templates * fix: request body undefined * feat: prompt tool run * feat: workflow tamplates modal * perf: plugin start * 4.7 (#50) * fix docker-compose download url (#994) original code is a bad url with '404 NOT FOUND' return. fix docker-compose download url, add 'v' before docker-compose version * Update ai_settings.md (#1000) * Update configuration.md * Update configuration.md * Fix history in classifyQuestion and extract modules (#1012) * Fix history in classifyQuestion and extract modules * Add chatValue2RuntimePrompt import and update text formatting * flow controller to packages * fix: rerank select * modal ui * perf: modal code path * point not sufficient * feat: http url support variable * fix http key * perf: prompt * perf: ai setting modal * simple edit ui --------- Co-authored-by: entorick <entorick11@qq.com> Co-authored-by: liujianglc <liujianglc@163.com> Co-authored-by: Fengrui Liu <liufengrui.work@bytedance.com> * fix team share redirect to login (#51) * feat: support openapi import plugins (#48) * feat: support openapi import plugins * feat: import from url * fix: add body params parse * fix build * fix * fix * fix * tool box ui (#52) * fix: training queue * feat: simple edit tool select * perf: simple edit dataset prompt * fix: chatbox tool ux * feat: quote prompt module * perf: plugin tools sign * perf: model avatar * tool selector ui * feat: max histories * perf: http plugin import (#53) * perf: plugin http import * chatBox ui * perf: name * fix: Node template card (#54) * fix: ts * setting modal * package * package * feat: add plugins search (#57) * feat: add plugins search * perf: change http plugin header input * Yjl (#56) * perf: prompt tool call * perf: chat box ux * doc * doc * price tip * perf: tool selector * ui' * fix: vector queue * fix: empty tool and empty response * fix: empty msg * perf: pg index * perf: ui tip * doc * tool tip --------- Co-authored-by: yst <77910600+yu-and-liu@users.noreply.github.com> Co-authored-by: entorick <entorick11@qq.com> Co-authored-by: liujianglc <liujianglc@163.com> Co-authored-by: Fengrui Liu <liufengrui.work@bytedance.com> Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
@@ -1,15 +0,0 @@
|
||||
import { POST } from '@fastgpt/service/common/api/plusRequest';
|
||||
|
||||
export const postTextCensor = (data: { text: string }) =>
|
||||
POST<{ code?: number; message: string }>('/common/censor/text_baidu', data)
|
||||
.then((res) => {
|
||||
if (res?.code === 5000) {
|
||||
return Promise.reject(res);
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
if (err?.code === 5000) {
|
||||
return Promise.reject(err.message);
|
||||
}
|
||||
return Promise.resolve('');
|
||||
});
|
||||
@@ -1,13 +0,0 @@
|
||||
import { cut } from '@node-rs/jieba';
|
||||
import { stopWords } from '@fastgpt/global/common/string/jieba';
|
||||
|
||||
export function jiebaSplit({ text }: { text: string }) {
|
||||
const tokens = cut(text, true);
|
||||
|
||||
return (
|
||||
tokens
|
||||
.map((item) => item.replace(/[\u3000-\u303f\uff00-\uffef]/g, '').trim())
|
||||
.filter((item) => item && !stopWords.has(item))
|
||||
.join(' ') || ''
|
||||
);
|
||||
}
|
||||
@@ -30,7 +30,6 @@ export function initGlobal() {
|
||||
if (global.communityPlugins) return;
|
||||
|
||||
global.communityPlugins = [];
|
||||
global.simpleModeTemplates = [];
|
||||
global.qaQueueLen = global.qaQueueLen ?? 0;
|
||||
global.vectorQueueLen = global.vectorQueueLen ?? 0;
|
||||
// init tikToken
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
import { PostReRankProps, PostReRankResponse } from '@fastgpt/global/core/ai/api';
|
||||
import { POST } from '@/service/common/api/request';
|
||||
|
||||
export function reRankRecall({ query, inputs }: PostReRankProps) {
|
||||
const model = global.reRankModels[0];
|
||||
|
||||
if (!model || !model?.requestUrl) {
|
||||
return Promise.reject('no rerank model');
|
||||
}
|
||||
|
||||
let start = Date.now();
|
||||
return POST<PostReRankResponse>(
|
||||
model.requestUrl,
|
||||
{
|
||||
query,
|
||||
inputs
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${model.requestAuth}`
|
||||
},
|
||||
timeout: 120000
|
||||
}
|
||||
)
|
||||
.then((data) => {
|
||||
console.log('rerank time:', Date.now() - start);
|
||||
return data;
|
||||
})
|
||||
.catch((err) => {
|
||||
console.log('rerank error:', err);
|
||||
|
||||
return [];
|
||||
});
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
import { AppSimpleEditConfigTemplateType } from '@fastgpt/global/core/app/type';
|
||||
import { GET } from '@fastgpt/service/common/api/plusRequest';
|
||||
import { FastGPTProUrl } from '@fastgpt/service/common/system/constants';
|
||||
|
||||
export async function getSimpleTemplatesFromPlus(): Promise<AppSimpleEditConfigTemplateType[]> {
|
||||
try {
|
||||
if (!FastGPTProUrl) return [];
|
||||
|
||||
return GET<AppSimpleEditConfigTemplateType[]>('/core/app/getSimpleTemplates');
|
||||
} catch (error) {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
@@ -4,53 +4,14 @@ import {
|
||||
PatchIndexesProps,
|
||||
UpdateDatasetDataProps
|
||||
} from '@fastgpt/global/core/dataset/controller';
|
||||
import {
|
||||
insertDatasetDataVector,
|
||||
recallFromVectorStore
|
||||
} from '@fastgpt/service/common/vectorStore/controller';
|
||||
import {
|
||||
DatasetSearchModeEnum,
|
||||
DatasetSearchModeMap,
|
||||
SearchScoreTypeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
|
||||
import { insertDatasetDataVector } from '@fastgpt/service/common/vectorStore/controller';
|
||||
import { getDefaultIndex } from '@fastgpt/global/core/dataset/utils';
|
||||
import { jiebaSplit } from '@/service/common/string/jieba';
|
||||
import { jiebaSplit } from '@fastgpt/service/common/string/jieba';
|
||||
import { deleteDatasetDataVector } from '@fastgpt/service/common/vectorStore/controller';
|
||||
import { getVectorsByText } from '@fastgpt/service/core/ai/embedding';
|
||||
import { MongoDatasetCollection } from '@fastgpt/service/core/dataset/collection/schema';
|
||||
import {
|
||||
DatasetDataItemType,
|
||||
DatasetDataSchemaType,
|
||||
DatasetDataWithCollectionType,
|
||||
SearchDataResponseItemType
|
||||
} from '@fastgpt/global/core/dataset/type';
|
||||
import { reRankRecall } from '../../ai/rerank';
|
||||
import { countPromptTokens } from '@fastgpt/global/common/string/tiktoken';
|
||||
import { hashStr } from '@fastgpt/global/common/string/tools';
|
||||
import type {
|
||||
PushDatasetDataProps,
|
||||
PushDatasetDataResponse
|
||||
} from '@fastgpt/global/core/dataset/api.d';
|
||||
import { pushDataListToTrainingQueue } from '@fastgpt/service/core/dataset/training/controller';
|
||||
import { DatasetDataItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { getVectorModel } from '@fastgpt/service/core/ai/model';
|
||||
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
|
||||
|
||||
export async function pushDataToTrainingQueue(
|
||||
props: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
} & PushDatasetDataProps
|
||||
): Promise<PushDatasetDataResponse> {
|
||||
const result = await pushDataListToTrainingQueue({
|
||||
...props,
|
||||
vectorModelList: global.vectorModels,
|
||||
datasetModelList: global.llmModels
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* insert data.
|
||||
* 1. create data id
|
||||
* 2. insert pg
|
||||
@@ -283,390 +244,3 @@ export const deleteDatasetData = async (data: DatasetDataItemType) => {
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
type SearchDatasetDataProps = {
|
||||
teamId: string;
|
||||
model: string;
|
||||
similarity?: number; // min distance
|
||||
limit: number; // max Token limit
|
||||
datasetIds: string[];
|
||||
searchMode?: `${DatasetSearchModeEnum}`;
|
||||
usingReRank?: boolean;
|
||||
reRankQuery: string;
|
||||
queries: string[];
|
||||
};
|
||||
|
||||
export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
let {
|
||||
teamId,
|
||||
reRankQuery,
|
||||
queries,
|
||||
model,
|
||||
similarity = 0,
|
||||
limit: maxTokens,
|
||||
searchMode = DatasetSearchModeEnum.embedding,
|
||||
usingReRank = false,
|
||||
datasetIds = []
|
||||
} = props;
|
||||
|
||||
/* init params */
|
||||
searchMode = DatasetSearchModeMap[searchMode] ? searchMode : DatasetSearchModeEnum.embedding;
|
||||
usingReRank = usingReRank && global.reRankModels.length > 0;
|
||||
|
||||
// Compatible with topk limit
|
||||
if (maxTokens < 50) {
|
||||
maxTokens = 1500;
|
||||
}
|
||||
let set = new Set<string>();
|
||||
let usingSimilarityFilter = false;
|
||||
|
||||
/* function */
|
||||
const countRecallLimit = () => {
|
||||
if (searchMode === DatasetSearchModeEnum.embedding) {
|
||||
return {
|
||||
embeddingLimit: 100,
|
||||
fullTextLimit: 0
|
||||
};
|
||||
}
|
||||
if (searchMode === DatasetSearchModeEnum.fullTextRecall) {
|
||||
return {
|
||||
embeddingLimit: 0,
|
||||
fullTextLimit: 100
|
||||
};
|
||||
}
|
||||
return {
|
||||
embeddingLimit: 60,
|
||||
fullTextLimit: 40
|
||||
};
|
||||
};
|
||||
const embeddingRecall = async ({ query, limit }: { query: string; limit: number }) => {
|
||||
const { vectors, tokens } = await getVectorsByText({
|
||||
model: getVectorModel(model),
|
||||
input: query
|
||||
});
|
||||
|
||||
const { results } = await recallFromVectorStore({
|
||||
vectors,
|
||||
limit,
|
||||
datasetIds,
|
||||
efSearch: global.systemEnv?.pgHNSWEfSearch
|
||||
});
|
||||
|
||||
// get q and a
|
||||
const dataList = (await MongoDatasetData.find(
|
||||
{
|
||||
teamId,
|
||||
datasetId: { $in: datasetIds },
|
||||
'indexes.dataId': { $in: results.map((item) => item.id?.trim()) }
|
||||
},
|
||||
'datasetId collectionId q a chunkIndex indexes'
|
||||
)
|
||||
.populate('collectionId', 'name fileId rawLink')
|
||||
.lean()) as DatasetDataWithCollectionType[];
|
||||
|
||||
// add score to data(It's already sorted. The first one is the one with the most points)
|
||||
const concatResults = dataList.map((data) => {
|
||||
const dataIdList = data.indexes.map((item) => item.dataId);
|
||||
|
||||
const maxScoreResult = results.find((item) => {
|
||||
return dataIdList.includes(item.id);
|
||||
});
|
||||
|
||||
return {
|
||||
...data,
|
||||
score: maxScoreResult?.score || 0
|
||||
};
|
||||
});
|
||||
|
||||
concatResults.sort((a, b) => b.score - a.score);
|
||||
|
||||
const formatResult = concatResults
|
||||
.map((data, index) => {
|
||||
if (!data.collectionId) {
|
||||
console.log('Collection is not found', data);
|
||||
}
|
||||
|
||||
const result: SearchDataResponseItemType = {
|
||||
id: String(data._id),
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
datasetId: String(data.datasetId),
|
||||
collectionId: String(data.collectionId?._id),
|
||||
sourceName: data.collectionId?.name || '',
|
||||
sourceId: data.collectionId?.fileId || data.collectionId?.rawLink,
|
||||
score: [{ type: SearchScoreTypeEnum.embedding, value: data.score, index }]
|
||||
};
|
||||
|
||||
return result;
|
||||
})
|
||||
.filter((item) => item !== null) as SearchDataResponseItemType[];
|
||||
|
||||
return {
|
||||
embeddingRecallResults: formatResult,
|
||||
tokens
|
||||
};
|
||||
};
|
||||
const fullTextRecall = async ({
|
||||
query,
|
||||
limit
|
||||
}: {
|
||||
query: string;
|
||||
limit: number;
|
||||
}): Promise<{
|
||||
fullTextRecallResults: SearchDataResponseItemType[];
|
||||
tokenLen: number;
|
||||
}> => {
|
||||
if (limit === 0) {
|
||||
return {
|
||||
fullTextRecallResults: [],
|
||||
tokenLen: 0
|
||||
};
|
||||
}
|
||||
|
||||
let searchResults = (
|
||||
await Promise.all(
|
||||
datasetIds.map((id) =>
|
||||
MongoDatasetData.find(
|
||||
{
|
||||
teamId,
|
||||
datasetId: id,
|
||||
$text: { $search: jiebaSplit({ text: query }) }
|
||||
},
|
||||
{
|
||||
score: { $meta: 'textScore' },
|
||||
_id: 1,
|
||||
datasetId: 1,
|
||||
collectionId: 1,
|
||||
q: 1,
|
||||
a: 1,
|
||||
chunkIndex: 1
|
||||
}
|
||||
)
|
||||
.sort({ score: { $meta: 'textScore' } })
|
||||
.limit(limit)
|
||||
.lean()
|
||||
)
|
||||
)
|
||||
).flat() as (DatasetDataSchemaType & { score: number })[];
|
||||
|
||||
// resort
|
||||
searchResults.sort((a, b) => b.score - a.score);
|
||||
searchResults.slice(0, limit);
|
||||
|
||||
const collections = await MongoDatasetCollection.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.collectionId) }
|
||||
},
|
||||
'_id name fileId rawLink'
|
||||
);
|
||||
|
||||
return {
|
||||
fullTextRecallResults: searchResults.map((item, index) => {
|
||||
const collection = collections.find((col) => String(col._id) === String(item.collectionId));
|
||||
return {
|
||||
id: String(item._id),
|
||||
datasetId: String(item.datasetId),
|
||||
collectionId: String(item.collectionId),
|
||||
sourceName: collection?.name || '',
|
||||
sourceId: collection?.fileId || collection?.rawLink,
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
chunkIndex: item.chunkIndex,
|
||||
indexes: item.indexes,
|
||||
score: [{ type: SearchScoreTypeEnum.fullText, value: item.score, index }]
|
||||
};
|
||||
}),
|
||||
tokenLen: 0
|
||||
};
|
||||
};
|
||||
const reRankSearchResult = async ({
|
||||
data,
|
||||
query
|
||||
}: {
|
||||
data: SearchDataResponseItemType[];
|
||||
query: string;
|
||||
}): Promise<SearchDataResponseItemType[]> => {
|
||||
try {
|
||||
const results = await reRankRecall({
|
||||
query,
|
||||
inputs: data.map((item) => ({
|
||||
id: item.id,
|
||||
text: `${item.q}\n${item.a}`
|
||||
}))
|
||||
});
|
||||
|
||||
if (results.length === 0) {
|
||||
usingReRank = false;
|
||||
return [];
|
||||
}
|
||||
|
||||
// add new score to data
|
||||
const mergeResult = results
|
||||
.map((item, index) => {
|
||||
const target = data.find((dataItem) => dataItem.id === item.id);
|
||||
if (!target) return null;
|
||||
const score = item.score || 0;
|
||||
|
||||
return {
|
||||
...target,
|
||||
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
|
||||
};
|
||||
})
|
||||
.filter(Boolean) as SearchDataResponseItemType[];
|
||||
|
||||
return mergeResult;
|
||||
} catch (error) {
|
||||
usingReRank = false;
|
||||
return [];
|
||||
}
|
||||
};
|
||||
const filterResultsByMaxTokens = (list: SearchDataResponseItemType[], maxTokens: number) => {
|
||||
const results: SearchDataResponseItemType[] = [];
|
||||
let totalTokens = 0;
|
||||
|
||||
for (let i = 0; i < list.length; i++) {
|
||||
const item = list[i];
|
||||
totalTokens += countPromptTokens(item.q + item.a);
|
||||
if (totalTokens > maxTokens + 500) {
|
||||
break;
|
||||
}
|
||||
results.push(item);
|
||||
if (totalTokens > maxTokens) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return results.length === 0 ? list.slice(0, 1) : results;
|
||||
};
|
||||
const multiQueryRecall = async ({
|
||||
embeddingLimit,
|
||||
fullTextLimit
|
||||
}: {
|
||||
embeddingLimit: number;
|
||||
fullTextLimit: number;
|
||||
}) => {
|
||||
// multi query recall
|
||||
const embeddingRecallResList: SearchDataResponseItemType[][] = [];
|
||||
const fullTextRecallResList: SearchDataResponseItemType[][] = [];
|
||||
let totalTokens = 0;
|
||||
|
||||
await Promise.all(
|
||||
queries.map(async (query) => {
|
||||
const [{ tokens, embeddingRecallResults }, { fullTextRecallResults }] = await Promise.all([
|
||||
embeddingRecall({
|
||||
query,
|
||||
limit: embeddingLimit
|
||||
}),
|
||||
fullTextRecall({
|
||||
query,
|
||||
limit: fullTextLimit
|
||||
})
|
||||
]);
|
||||
totalTokens += tokens;
|
||||
|
||||
embeddingRecallResList.push(embeddingRecallResults);
|
||||
fullTextRecallResList.push(fullTextRecallResults);
|
||||
})
|
||||
);
|
||||
|
||||
// rrf concat
|
||||
const rrfEmbRecall = datasetSearchResultConcat(
|
||||
embeddingRecallResList.map((list) => ({ k: 60, list }))
|
||||
).slice(0, embeddingLimit);
|
||||
const rrfFTRecall = datasetSearchResultConcat(
|
||||
fullTextRecallResList.map((list) => ({ k: 60, list }))
|
||||
).slice(0, fullTextLimit);
|
||||
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
embeddingRecallResults: rrfEmbRecall,
|
||||
fullTextRecallResults: rrfFTRecall
|
||||
};
|
||||
};
|
||||
|
||||
/* main step */
|
||||
// count limit
|
||||
const { embeddingLimit, fullTextLimit } = countRecallLimit();
|
||||
|
||||
// recall
|
||||
const { embeddingRecallResults, fullTextRecallResults, tokens } = await multiQueryRecall({
|
||||
embeddingLimit,
|
||||
fullTextLimit
|
||||
});
|
||||
|
||||
// ReRank results
|
||||
const reRankResults = await (async () => {
|
||||
if (!usingReRank) return [];
|
||||
|
||||
set = new Set<string>(embeddingRecallResults.map((item) => item.id));
|
||||
const concatRecallResults = embeddingRecallResults.concat(
|
||||
fullTextRecallResults.filter((item) => !set.has(item.id))
|
||||
);
|
||||
|
||||
// remove same q and a data
|
||||
set = new Set<string>();
|
||||
const filterSameDataResults = concatRecallResults.filter((item) => {
|
||||
// 删除所有的标点符号与空格等,只对文本进行比较
|
||||
const str = hashStr(`${item.q}${item.a}`.replace(/[^\p{L}\p{N}]/gu, ''));
|
||||
if (set.has(str)) return false;
|
||||
set.add(str);
|
||||
return true;
|
||||
});
|
||||
return reRankSearchResult({
|
||||
query: reRankQuery,
|
||||
data: filterSameDataResults
|
||||
});
|
||||
})();
|
||||
|
||||
// embedding recall and fullText recall rrf concat
|
||||
const rrfConcatResults = datasetSearchResultConcat([
|
||||
{ k: 60, list: embeddingRecallResults },
|
||||
{ k: 64, list: fullTextRecallResults },
|
||||
{ k: 60, list: reRankResults }
|
||||
]);
|
||||
|
||||
// remove same q and a data
|
||||
set = new Set<string>();
|
||||
const filterSameDataResults = rrfConcatResults.filter((item) => {
|
||||
// 删除所有的标点符号与空格等,只对文本进行比较
|
||||
const str = hashStr(`${item.q}${item.a}`.replace(/[^\p{L}\p{N}]/gu, ''));
|
||||
if (set.has(str)) return false;
|
||||
set.add(str);
|
||||
return true;
|
||||
});
|
||||
|
||||
// score filter
|
||||
const scoreFilter = (() => {
|
||||
if (usingReRank) {
|
||||
usingSimilarityFilter = true;
|
||||
|
||||
return filterSameDataResults.filter((item) => {
|
||||
const reRankScore = item.score.find((item) => item.type === SearchScoreTypeEnum.reRank);
|
||||
if (reRankScore && reRankScore.value < similarity) return false;
|
||||
return true;
|
||||
});
|
||||
}
|
||||
if (searchMode === DatasetSearchModeEnum.embedding) {
|
||||
usingSimilarityFilter = true;
|
||||
return filterSameDataResults.filter((item) => {
|
||||
const embeddingScore = item.score.find(
|
||||
(item) => item.type === SearchScoreTypeEnum.embedding
|
||||
);
|
||||
if (embeddingScore && embeddingScore.value < similarity) return false;
|
||||
return true;
|
||||
});
|
||||
}
|
||||
return filterSameDataResults;
|
||||
})();
|
||||
|
||||
return {
|
||||
searchRes: filterResultsByMaxTokens(scoreFilter, maxTokens),
|
||||
tokens,
|
||||
searchMode,
|
||||
limit: maxTokens,
|
||||
similarity,
|
||||
usingReRank,
|
||||
usingSimilarityFilter
|
||||
};
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ export const createDatasetTrainingMongoWatch = () => {
|
||||
|
||||
export const startTrainingQueue = (fast?: boolean) => {
|
||||
const max = global.systemEnv?.qaMaxProcess || 10;
|
||||
for (let i = 0; i < max; i++) {
|
||||
for (let i = 0; i < (fast ? max : 1); i++) {
|
||||
generateQA();
|
||||
generateVector();
|
||||
}
|
||||
|
||||
@@ -6,14 +6,14 @@ import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d'
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_AgentQA } from '@/global/core/prompt/agent';
|
||||
import { Prompt_AgentQA } from '@fastgpt/global/core/ai/prompt/agent';
|
||||
import type { PushDatasetDataChunkProps } from '@fastgpt/global/core/dataset/api.d';
|
||||
import { pushDataToTrainingQueue } from '@/service/core/dataset/data/controller';
|
||||
import { getLLMModel } from '@fastgpt/service/core/ai/model';
|
||||
import { checkTeamAiPointsAndLock } from './utils';
|
||||
import { checkInvalidChunkAndLock } from '@fastgpt/service/core/dataset/training/utils';
|
||||
import { addMinutes } from 'date-fns';
|
||||
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
|
||||
import { pushDataListToTrainingQueue } from '@fastgpt/service/core/dataset/training/controller';
|
||||
|
||||
const reduceQueue = () => {
|
||||
global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0;
|
||||
@@ -128,7 +128,7 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
|
||||
});
|
||||
|
||||
// get vector and insert
|
||||
const { insertLen } = await pushDataToTrainingQueue({
|
||||
const { insertLen } = await pushDataListToTrainingQueue({
|
||||
teamId: data.teamId,
|
||||
tmbId: data.tmbId,
|
||||
collectionId: data.collectionId,
|
||||
|
||||
@@ -30,31 +30,26 @@ export async function generateVector(): Promise<any> {
|
||||
try {
|
||||
const data = await MongoDatasetTraining.findOneAndUpdate(
|
||||
{
|
||||
lockTime: { $lte: addMinutes(new Date(), -1) },
|
||||
mode: TrainingModeEnum.chunk
|
||||
mode: TrainingModeEnum.chunk,
|
||||
lockTime: { $lte: addMinutes(new Date(), -1) }
|
||||
},
|
||||
{
|
||||
lockTime: new Date()
|
||||
}
|
||||
)
|
||||
.sort({
|
||||
weight: -1
|
||||
})
|
||||
.select({
|
||||
_id: 1,
|
||||
userId: 1,
|
||||
teamId: 1,
|
||||
tmbId: 1,
|
||||
datasetId: 1,
|
||||
collectionId: 1,
|
||||
q: 1,
|
||||
a: 1,
|
||||
chunkIndex: 1,
|
||||
indexes: 1,
|
||||
model: 1,
|
||||
billId: 1
|
||||
})
|
||||
.lean();
|
||||
).select({
|
||||
_id: 1,
|
||||
userId: 1,
|
||||
teamId: 1,
|
||||
tmbId: 1,
|
||||
datasetId: 1,
|
||||
collectionId: 1,
|
||||
q: 1,
|
||||
a: 1,
|
||||
chunkIndex: 1,
|
||||
indexes: 1,
|
||||
model: 1,
|
||||
billId: 1
|
||||
});
|
||||
|
||||
// task preemption
|
||||
if (!data) {
|
||||
@@ -102,7 +97,7 @@ export async function generateVector(): Promise<any> {
|
||||
try {
|
||||
// invalid data
|
||||
if (!data.q.trim()) {
|
||||
await MongoDatasetTraining.findByIdAndDelete(data._id);
|
||||
await data.deleteOne();
|
||||
reduceQueue();
|
||||
generateVector();
|
||||
return;
|
||||
@@ -131,7 +126,7 @@ export async function generateVector(): Promise<any> {
|
||||
});
|
||||
|
||||
// delete data from training
|
||||
await MongoDatasetTraining.findByIdAndDelete(data._id);
|
||||
await data.deleteOne();
|
||||
reduceQueue();
|
||||
generateVector();
|
||||
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
|
||||
import {
|
||||
countGptMessagesTokens,
|
||||
countMessagesTokens
|
||||
} from '@fastgpt/global/common/string/tiktoken';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { getAIApi } from '@fastgpt/service/core/ai/config';
|
||||
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/module/type.d';
|
||||
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_CQJson } from '@/global/core/prompt/agent';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
|
||||
import { getHistories } from '../utils';
|
||||
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import {
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.aiModel]: string;
|
||||
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
[ModuleInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
|
||||
}>;
|
||||
type CQResponse = DispatchNodeResultType<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type ActionProps = Props & { cqModel: LLMModelItemType };
|
||||
|
||||
const agentFunName = 'classify_question';
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
|
||||
const {
|
||||
user,
|
||||
module: { name },
|
||||
histories,
|
||||
params: { model, history = 6, agents, userChatInput }
|
||||
} = props as Props;
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const cqModel = getLLMModel(model);
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (cqModel.toolChoice) {
|
||||
return toolChoice({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
cqModel
|
||||
});
|
||||
}
|
||||
if (cqModel.functionCall) {
|
||||
return functionCall({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
cqModel
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
cqModel
|
||||
});
|
||||
})();
|
||||
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: cqModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
[result.key]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
tokens,
|
||||
cqList: agents,
|
||||
cqResult: result.value,
|
||||
contextTotalLen: chatHistories.length + 2
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
]
|
||||
};
|
||||
};
|
||||
|
||||
const getFunctionCallSchema = ({
|
||||
cqModel,
|
||||
histories,
|
||||
params: { agents, systemPrompt, userChatInput }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: systemPrompt
|
||||
? `<背景知识>
|
||||
${systemPrompt}
|
||||
</背景知识>
|
||||
|
||||
问题: "${userChatInput}"
|
||||
`
|
||||
: userChatInput
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
const filterMessages = filterGPTMessageByMaxTokens({
|
||||
messages: adaptMessages,
|
||||
maxTokens: cqModel.maxContext
|
||||
});
|
||||
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '结合对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
description: `问题类型。下面是几种可选的问题类型: ${agents
|
||||
.map((item) => `${item.value},返回:'${item.key}'`)
|
||||
.join(';')}`,
|
||||
enum: agents.map((item) => item.key)
|
||||
}
|
||||
},
|
||||
required: ['type']
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
agentFunction,
|
||||
filterMessages
|
||||
};
|
||||
};
|
||||
|
||||
const toolChoice = async (props: ActionProps) => {
|
||||
const { user, cqModel } = props;
|
||||
|
||||
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
|
||||
// function body
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: agentFunction
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(
|
||||
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
|
||||
);
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: response.choices?.[0]?.message?.tool_calls
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: countGptMessagesTokens(completeMessages, tools)
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
tokens: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const functionCall = async (props: ActionProps) => {
|
||||
const { user, cqModel } = props;
|
||||
|
||||
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
|
||||
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: response.choices?.[0]?.message?.function_call
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
tokens: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const completions = async ({
|
||||
cqModel,
|
||||
user,
|
||||
histories,
|
||||
params: { agents, systemPrompt = '', userChatInput }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
|
||||
systemPrompt: systemPrompt || 'null',
|
||||
typeList: agents
|
||||
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
|
||||
.join('\n'),
|
||||
history: histories
|
||||
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
|
||||
.join('\n'),
|
||||
question: userChatInput
|
||||
})
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const data = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
const id =
|
||||
agents.find((item) => answer.includes(item.key) || answer.includes(item.value))?.key || '';
|
||||
|
||||
return {
|
||||
tokens: countMessagesTokens(messages),
|
||||
arg: { type: id }
|
||||
};
|
||||
};
|
||||
@@ -1,375 +0,0 @@
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import {
|
||||
countGptMessagesTokens,
|
||||
countMessagesTokens
|
||||
} from '@fastgpt/global/common/string/tiktoken';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { getAIApi } from '@fastgpt/service/core/ai/config';
|
||||
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/module/type';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { Prompt_ExtractJson } from '@/global/core/prompt/agent';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getHistories } from '../utils';
|
||||
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
|
||||
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
|
||||
import json5 from 'json5';
|
||||
import {
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[];
|
||||
[ModuleInputKeyEnum.contextExtractInput]: string;
|
||||
[ModuleInputKeyEnum.extractKeys]: ContextExtractAgentItemType[];
|
||||
[ModuleInputKeyEnum.description]: string;
|
||||
[ModuleInputKeyEnum.aiModel]: string;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.success]?: boolean;
|
||||
[ModuleOutputKeyEnum.failed]?: boolean;
|
||||
[ModuleOutputKeyEnum.contextExtractFields]: string;
|
||||
}>;
|
||||
|
||||
type ActionProps = Props & { extractModel: LLMModelItemType };
|
||||
|
||||
const agentFunName = 'extract_json_data';
|
||||
|
||||
export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
const {
|
||||
user,
|
||||
module: { name },
|
||||
histories,
|
||||
params: { content, history = 6, model, description, extractKeys }
|
||||
} = props;
|
||||
|
||||
if (!content) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const extractModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (extractModel.toolChoice) {
|
||||
return toolChoice({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel
|
||||
});
|
||||
}
|
||||
if (extractModel.functionCall) {
|
||||
return functionCall({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel
|
||||
});
|
||||
})();
|
||||
|
||||
// remove invalid key
|
||||
for (let key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
delete arg[key];
|
||||
}
|
||||
if (arg[key] === '') {
|
||||
delete arg[key];
|
||||
}
|
||||
}
|
||||
|
||||
// auto fill required fields
|
||||
extractKeys.forEach((item) => {
|
||||
if (item.required && !arg[item.key]) {
|
||||
arg[item.key] = item.defaultValue || '';
|
||||
}
|
||||
});
|
||||
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !(item.key in arg));
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: extractModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
[ModuleOutputKeyEnum.success]: success ? true : undefined,
|
||||
[ModuleOutputKeyEnum.failed]: success ? undefined : true,
|
||||
[ModuleOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
|
||||
...arg,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
query: content,
|
||||
tokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg,
|
||||
contextTotalLen: chatHistories.length + 2
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
const getFunctionCallSchema = ({
|
||||
extractModel,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: `你的任务是根据上下文获取适当的 JSON 字符串。要求:
|
||||
"""
|
||||
- 字符串不要换行。
|
||||
- 结合上下文和当前问题进行获取。
|
||||
"""
|
||||
|
||||
当前问题: "${content}"`
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
const filterMessages = filterGPTMessageByMaxTokens({
|
||||
messages: adaptMessages,
|
||||
maxTokens: extractModel.maxContext
|
||||
});
|
||||
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
}
|
||||
> = {};
|
||||
extractKeys.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.desc,
|
||||
...(item.enum ? { enum: item.enum.split('\n') } : {})
|
||||
};
|
||||
});
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
filterMessages,
|
||||
agentFunction
|
||||
};
|
||||
};
|
||||
|
||||
const toolChoice = async (props: ActionProps) => {
|
||||
const { user, extractModel } = props;
|
||||
|
||||
const { filterMessages, agentFunction } = getFunctionCallSchema(props);
|
||||
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: agentFunction
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return json5.parse(
|
||||
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '{}'
|
||||
);
|
||||
} catch (error) {
|
||||
console.log(agentFunction.parameters);
|
||||
console.log(response.choices?.[0]?.message?.tool_calls?.[0]?.function);
|
||||
console.log('Your model may not support tool_call', error);
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: response.choices?.[0]?.message?.tool_calls
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
tokens: countGptMessagesTokens(completeMessages, tools),
|
||||
arg
|
||||
};
|
||||
};
|
||||
|
||||
const functionCall = async (props: ActionProps) => {
|
||||
const { user, extractModel } = props;
|
||||
|
||||
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
|
||||
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
const completeMessages: ChatCompletionMessageParam[] = [
|
||||
...filterMessages,
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: response.choices?.[0]?.message?.function_call
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
tokens: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const completions = async ({
|
||||
extractModel,
|
||||
user,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: [
|
||||
{
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
|
||||
description,
|
||||
json: extractKeys
|
||||
.map(
|
||||
(item) =>
|
||||
`{"key":"${item.key}", "description":"${item.desc}"${
|
||||
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
|
||||
}}`
|
||||
)
|
||||
.join('\n'),
|
||||
text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')}
|
||||
Human: ${content}`
|
||||
})
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
// parse response
|
||||
const start = answer.indexOf('{');
|
||||
const end = answer.lastIndexOf('}');
|
||||
|
||||
if (start === -1 || end === -1)
|
||||
return {
|
||||
rawResponse: answer,
|
||||
tokens: countMessagesTokens(messages),
|
||||
arg: {}
|
||||
};
|
||||
|
||||
try {
|
||||
return {
|
||||
rawResponse: answer,
|
||||
tokens: countMessagesTokens(messages),
|
||||
arg: json5.parse(answer) as Record<string, any>
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return {
|
||||
rawResponse: answer,
|
||||
tokens: countMessagesTokens(messages),
|
||||
arg: {}
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -1,359 +0,0 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '@fastgpt/service/core/ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageFunctionCall,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '@fastgpt/service/common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlow } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
moduleRunResponse: DispatchFlowResponse;
|
||||
functionCallMsg: ChatCompletionFunctionMessageParam;
|
||||
}[];
|
||||
|
||||
export const runToolWithFunctionCall = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolModules: ToolModuleItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const {
|
||||
toolModel,
|
||||
toolModules,
|
||||
messages,
|
||||
res,
|
||||
runtimeModules,
|
||||
detail = false,
|
||||
module,
|
||||
stream
|
||||
} = props;
|
||||
|
||||
const functions: ChatCompletionCreateParams.Function[] = toolModules.map((module) => {
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
required?: boolean;
|
||||
}
|
||||
> = {};
|
||||
module.toolParams.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.toolDescription || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
name: module.moduleId,
|
||||
description: module.intro,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const { answer, functionCalls } = await (async () => {
|
||||
if (stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream: aiResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const function_call = result.choices?.[0]?.message?.function_call;
|
||||
const toolModule = toolModules.find((module) => module.moduleId === function_call?.name);
|
||||
|
||||
const toolCalls = function_call
|
||||
? [
|
||||
{
|
||||
...function_call,
|
||||
id: getNanoid(),
|
||||
toolName: toolModule?.name,
|
||||
toolAvatar: toolModule?.avatar
|
||||
}
|
||||
]
|
||||
: [];
|
||||
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
functionCalls: toolCalls
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = (
|
||||
await Promise.all(
|
||||
functionCalls.map(async (tool) => {
|
||||
if (!tool) return;
|
||||
|
||||
const toolModule = toolModules.find((module) => module.moduleId === tool.name);
|
||||
|
||||
if (!toolModule) return;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
runtimeModules: runtimeModules.map((module) => ({
|
||||
...module,
|
||||
isEntry: module.moduleId === toolModule.moduleId
|
||||
})),
|
||||
startParams
|
||||
});
|
||||
|
||||
const functionCallMsg: ChatCompletionFunctionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Function,
|
||||
name: tool.name,
|
||||
content: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
|
||||
};
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
functionCallMsg
|
||||
};
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as ToolRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
|
||||
|
||||
const functionCall = functionCalls[0];
|
||||
if (functionCall && !res.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
function_call: functionCall
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
const tokens = countGptMessagesTokens(concatToolMessages, undefined, functions);
|
||||
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name: module.name
|
||||
});
|
||||
}
|
||||
|
||||
return runToolWithFunctionCall(
|
||||
{
|
||||
...props,
|
||||
messages: [...concatToolMessages, ...toolsRunResponse.map((item) => item?.functionCallMsg)]
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse: response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const completeMessages = filterMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
});
|
||||
|
||||
const tokens = countGptMessagesTokens(completeMessages, undefined, functions);
|
||||
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolModules: ToolModuleItemType[];
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
|
||||
let functionId = getNanoid();
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
if (responseChoice.content) {
|
||||
const content = responseChoice?.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
} else if (responseChoice.function_call) {
|
||||
const functionCall: {
|
||||
arguments: string;
|
||||
name?: string;
|
||||
} = responseChoice.function_call;
|
||||
|
||||
// 流响应中,每次只会返回一个函数,如果带了name,说明触发某个函数
|
||||
if (functionCall?.name) {
|
||||
functionId = getNanoid();
|
||||
const toolModule = toolModules.find((module) => module.moduleId === functionCall?.name);
|
||||
|
||||
if (toolModule) {
|
||||
if (functionCall?.arguments === undefined) {
|
||||
functionCall.arguments = '';
|
||||
}
|
||||
functionCalls.push({
|
||||
...functionCall,
|
||||
id: functionId,
|
||||
name: functionCall.name,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar
|
||||
});
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar,
|
||||
functionName: functionCall.name,
|
||||
params: functionCall.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
/* arg 插入最后一个工具的参数里 */
|
||||
const arg: string = functionCall?.arguments || '';
|
||||
const currentTool = functionCalls[functionCalls.length - 1];
|
||||
if (currentTool) {
|
||||
currentTool.arguments += arg;
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: functionId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && functionCalls.length === 0) {
|
||||
return Promise.reject('LLM api response empty');
|
||||
}
|
||||
|
||||
return { answer: textAnswer, functionCalls };
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import type {
|
||||
DispatchNodeResultType,
|
||||
RunningModuleItemType
|
||||
} from '@fastgpt/global/core/module/runtime/type';
|
||||
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
|
||||
import { getHistories } from '../../utils';
|
||||
import { runToolWithToolChoice } from './toolChoice';
|
||||
import { DispatchToolModuleProps, ToolModuleItemType } from './type.d';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { runToolWithFunctionCall } from './functionCall';
|
||||
|
||||
type Response = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
module: { name, outputs },
|
||||
runtimeModules,
|
||||
histories,
|
||||
params: { model, systemPrompt, userChatInput, history = 6 }
|
||||
} = props;
|
||||
|
||||
const toolModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
/* get tool params */
|
||||
|
||||
// get tool output targets
|
||||
const toolOutput = outputs.find((output) => output.key === ModuleOutputKeyEnum.selectedTools);
|
||||
|
||||
if (!toolOutput) {
|
||||
return Promise.reject('No tool output found');
|
||||
}
|
||||
|
||||
const targets = toolOutput.targets;
|
||||
|
||||
// Gets the module to which the tool is connected
|
||||
const toolModules = targets
|
||||
.map((item) => {
|
||||
const tool = runtimeModules.find((module) => module.moduleId === item.moduleId);
|
||||
return tool;
|
||||
})
|
||||
.filter(Boolean)
|
||||
.map<ToolModuleItemType>((tool) => {
|
||||
const toolParams = tool?.inputs.filter((input) => !!input.toolDescription) || [];
|
||||
return {
|
||||
...(tool as RunningModuleItemType),
|
||||
toolParams
|
||||
};
|
||||
});
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...chatHistories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: []
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const {
|
||||
dispatchFlowResponse,
|
||||
totalTokens,
|
||||
completeMessages = []
|
||||
} = await (async () => {
|
||||
if (toolModel.toolChoice) {
|
||||
return runToolWithToolChoice({
|
||||
...props,
|
||||
toolModules,
|
||||
toolModel,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false })
|
||||
});
|
||||
}
|
||||
if (toolModel.functionCall) {
|
||||
return runToolWithFunctionCall({
|
||||
...props,
|
||||
toolModules,
|
||||
toolModel,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false })
|
||||
});
|
||||
}
|
||||
return {
|
||||
dispatchFlowResponse: [],
|
||||
totalTokens: 0,
|
||||
completeMessages: []
|
||||
};
|
||||
})();
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens: totalTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
const adaptMessages = GPTMessages2Chats(completeMessages);
|
||||
//@ts-ignore
|
||||
const startIndex = adaptMessages.findLastIndex((item) => item.obj === ChatRoleEnum.Human);
|
||||
const assistantResponse = adaptMessages.slice(startIndex + 1);
|
||||
|
||||
// flat child tool response
|
||||
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
|
||||
|
||||
// concat tool usage
|
||||
const totalPointsUsage =
|
||||
totalPoints +
|
||||
dispatchFlowResponse.reduce((sum, item) => {
|
||||
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
|
||||
return sum + childrenTotal;
|
||||
}, 0);
|
||||
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponse
|
||||
.map((item) => item.value)
|
||||
.flat(),
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallTokens: totalTokens,
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false)),
|
||||
toolDetail: childToolResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens: totalTokens
|
||||
},
|
||||
...flatUsages
|
||||
]
|
||||
};
|
||||
};
|
||||
@@ -1,371 +0,0 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '@fastgpt/service/core/ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '@fastgpt/service/core/chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageToolCall,
|
||||
StreamChatType,
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionAssistantToolParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
responseWriteController,
|
||||
responseWriteNodeStatus
|
||||
} from '@fastgpt/service/common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlow } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
moduleRunResponse: DispatchFlowResponse;
|
||||
toolMsgParams: ChatCompletionToolMessageParam;
|
||||
}[];
|
||||
|
||||
export const runToolWithToolChoice = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolModules: ToolModuleItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const {
|
||||
toolModel,
|
||||
toolModules,
|
||||
messages,
|
||||
res,
|
||||
runtimeModules,
|
||||
detail = false,
|
||||
module,
|
||||
stream
|
||||
} = props;
|
||||
|
||||
const tools: ChatCompletionTool[] = toolModules.map((module) => {
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
required?: boolean;
|
||||
}
|
||||
> = {};
|
||||
module.toolParams.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.toolDescription || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: module.moduleId,
|
||||
description: module.intro,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
});
|
||||
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (stream) {
|
||||
return streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream: aiResponse
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
|
||||
// 加上name和avatar
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolModule?.name || '',
|
||||
toolAvatar: toolModule?.avatar || ''
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
toolCalls: toolCalls
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = (
|
||||
await Promise.all(
|
||||
toolCalls.map(async (tool) => {
|
||||
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
|
||||
|
||||
if (!toolModule) return;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.function.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
runtimeModules: runtimeModules.map((module) => ({
|
||||
...module,
|
||||
isEntry: module.moduleId === toolModule.moduleId
|
||||
})),
|
||||
startParams
|
||||
});
|
||||
|
||||
const toolMsgParams: ChatCompletionToolMessageParam = {
|
||||
tool_call_id: tool.id,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
name: tool.function.name,
|
||||
content: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
|
||||
};
|
||||
|
||||
if (stream && detail) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: JSON.stringify(moduleRunResponse.toolResponses, null, 2)
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
toolMsgParams
|
||||
};
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as ToolRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
|
||||
|
||||
if (toolCalls.length > 0 && !res.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
tool_calls: toolCalls
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
const tokens = countGptMessagesTokens(concatToolMessages, tools);
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// {
|
||||
// messages: concatToolMessages,
|
||||
// tools
|
||||
// },
|
||||
// null,
|
||||
// 2
|
||||
// )
|
||||
// );
|
||||
// console.log(tokens, 'tool');
|
||||
|
||||
if (stream && detail) {
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name: module.name
|
||||
});
|
||||
}
|
||||
|
||||
return runToolWithToolChoice(
|
||||
{
|
||||
...props,
|
||||
messages: [...concatToolMessages, ...toolsRunResponse.map((item) => item?.toolMsgParams)]
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse: response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const completeMessages = filterMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
});
|
||||
|
||||
const tokens = countGptMessagesTokens(completeMessages, tools);
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// {
|
||||
// messages: completeMessages,
|
||||
// tools
|
||||
// },
|
||||
// null,
|
||||
// 2
|
||||
// )
|
||||
// );
|
||||
// console.log(tokens, 'response token');
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
completeMessages
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
toolModules,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
toolModules: ToolModuleItemType[];
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let toolCalls: ChatCompletionMessageToolCall[] = [];
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
// console.log(JSON.stringify(responseChoice, null, 2));
|
||||
if (responseChoice.content) {
|
||||
const content = responseChoice?.content || '';
|
||||
textAnswer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
} else if (responseChoice.tool_calls?.[0]) {
|
||||
const toolCall: ChatCompletionMessageToolCall = responseChoice.tool_calls[0];
|
||||
|
||||
// 流响应中,每次只会返回一个工具. 如果带了 id,说明是执行一个工具
|
||||
if (toolCall.id) {
|
||||
const toolModule = toolModules.find(
|
||||
(module) => module.moduleId === toolCall.function?.name
|
||||
);
|
||||
|
||||
if (toolModule) {
|
||||
if (toolCall.function?.arguments === undefined) {
|
||||
toolCall.function.arguments = '';
|
||||
}
|
||||
toolCalls.push({
|
||||
...toolCall,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar
|
||||
});
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: toolCall.id,
|
||||
toolName: toolModule.name,
|
||||
toolAvatar: toolModule.avatar,
|
||||
functionName: toolCall.function.name,
|
||||
params: toolCall.function.arguments,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
/* arg 插入最后一个工具的参数里 */
|
||||
const arg: string = responseChoice.tool_calls?.[0]?.function?.arguments;
|
||||
const currentTool = toolCalls[toolCalls.length - 1];
|
||||
if (currentTool) {
|
||||
currentTool.function.arguments += arg;
|
||||
|
||||
if (detail) {
|
||||
responseWrite({
|
||||
write,
|
||||
event: SseResponseEventEnum.toolParams,
|
||||
data: JSON.stringify({
|
||||
tool: {
|
||||
id: currentTool.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: arg,
|
||||
response: ''
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && toolCalls.length === 0) {
|
||||
return Promise.reject('LLM api response empty');
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls };
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { FlowNodeInputItemType } from '@fastgpt/global/core/module/node/type';
|
||||
import type {
|
||||
ModuleDispatchProps,
|
||||
DispatchNodeResponseType
|
||||
} from '@fastgpt/global/core/module/type.d';
|
||||
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import type { DispatchFlowResponse } from '../../type.d';
|
||||
|
||||
export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[];
|
||||
[ModuleInputKeyEnum.aiModel]: string;
|
||||
[ModuleInputKeyEnum.aiSystemPrompt]: string;
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
}>;
|
||||
|
||||
export type RunToolResponse = {
|
||||
dispatchFlowResponse: DispatchFlowResponse[];
|
||||
totalTokens: number;
|
||||
completeMessages?: ChatCompletionMessageParam[];
|
||||
};
|
||||
export type ToolModuleItemType = RunningModuleItemType & {
|
||||
toolParams: RunningModuleItemType['inputs'];
|
||||
};
|
||||
@@ -1,400 +0,0 @@
|
||||
import type { NextApiResponse } from 'next';
|
||||
import {
|
||||
filterGPTMessageByMaxTokens,
|
||||
formatGPTMessagesInRequestBefore,
|
||||
loadChatImgToBase64
|
||||
} from '@fastgpt/service/core/chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
|
||||
import { getAIApi } from '@fastgpt/service/core/ai/config';
|
||||
import type {
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageParam,
|
||||
StreamChatType
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { postTextCensor } from '@/service/common/censor';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import type { ModuleItemType } from '@fastgpt/global/core/module/type.d';
|
||||
import type { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
import {
|
||||
countGptMessagesTokens,
|
||||
countMessagesTokens
|
||||
} from '@fastgpt/global/common/string/tiktoken';
|
||||
import {
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
GPTMessages2Chats,
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
|
||||
import type { AIChatModuleProps } from '@fastgpt/global/core/module/node/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { responseWrite, responseWriteController } from '@fastgpt/service/common/response';
|
||||
import { getLLMModel, ModelTypeEnum } from '@fastgpt/service/core/ai/model';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatModuleProps & {
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[ModuleInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
|
||||
}
|
||||
>;
|
||||
export type ChatResponse = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.answerText]: string;
|
||||
[ModuleOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
|
||||
let {
|
||||
res,
|
||||
stream = false,
|
||||
detail = false,
|
||||
user,
|
||||
histories,
|
||||
module: { name, outputs },
|
||||
inputFiles = [],
|
||||
params: {
|
||||
model,
|
||||
temperature = 0,
|
||||
maxToken = 4000,
|
||||
history = 6,
|
||||
quoteQA = [],
|
||||
userChatInput,
|
||||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
quoteTemplate,
|
||||
quotePrompt
|
||||
}
|
||||
} = props;
|
||||
if (!userChatInput && inputFiles.length === 0) {
|
||||
return Promise.reject('Question is empty');
|
||||
}
|
||||
stream = stream && isResponseAnswerText;
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
const { quoteText } = filterQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate
|
||||
});
|
||||
|
||||
// censor model and system key
|
||||
if (modelConstantsData.censor && !user.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${quoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
const { filterMessages } = getChatMessages({
|
||||
model: modelConstantsData,
|
||||
histories: chatHistories,
|
||||
quoteText,
|
||||
quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
systemPrompt
|
||||
});
|
||||
|
||||
const { max_tokens } = await getMaxTokens({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
});
|
||||
|
||||
// FastGPT temperature range: 1~10
|
||||
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const concatMessages = [
|
||||
...(modelConstantsData.defaultSystemChatPrompt
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: modelConstantsData.defaultSystemChatPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...formatGPTMessagesInRequestBefore(filterMessages)
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
if (concatMessages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await Promise.all(
|
||||
concatMessages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await loadChatImgToBase64(item.content)
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
const response = await ai.chat.completions.create(
|
||||
{
|
||||
...modelConstantsData?.defaultConfig,
|
||||
model: modelConstantsData.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: loadMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const { answerText } = await (async () => {
|
||||
if (stream) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream: response
|
||||
});
|
||||
|
||||
targetResponse({ res, detail, outputs });
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
} else {
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages = filterMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
|
||||
const tokens = countMessagesTokens(chatCompleteMessages);
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
],
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
};
|
||||
|
||||
function filterQuote({
|
||||
quoteQA = [],
|
||||
model,
|
||||
quoteTemplate
|
||||
}: {
|
||||
quoteQA: ChatProps['params']['quoteQA'];
|
||||
model: LLMModelItemType;
|
||||
quoteTemplate?: string;
|
||||
}) {
|
||||
function getValue(item: SearchDataResponseItemType, index: number) {
|
||||
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
source: item.sourceName,
|
||||
sourceId: String(item.sourceId || 'UnKnow'),
|
||||
index: index + 1
|
||||
});
|
||||
}
|
||||
|
||||
// slice filterSearch
|
||||
const filterQuoteQA = filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
|
||||
|
||||
const quoteText =
|
||||
filterQuoteQA.length > 0
|
||||
? `${filterQuoteQA.map((item, index) => getValue(item, index).trim()).join('\n------\n')}`
|
||||
: '';
|
||||
|
||||
return {
|
||||
filterQuoteQA: filterQuoteQA,
|
||||
quoteText
|
||||
};
|
||||
}
|
||||
function getChatMessages({
|
||||
quotePrompt,
|
||||
quoteText,
|
||||
histories = [],
|
||||
systemPrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
model
|
||||
}: {
|
||||
quotePrompt?: string;
|
||||
quoteText: string;
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
inputFiles: UserChatItemValueItemType['file'][];
|
||||
model: LLMModelItemType;
|
||||
}) {
|
||||
const replaceInputValue = quoteText
|
||||
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: quoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
files: inputFiles,
|
||||
text: replaceInputValue
|
||||
})
|
||||
}
|
||||
];
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
|
||||
const filterMessages = filterGPTMessageByMaxTokens({
|
||||
messages: adaptMessages,
|
||||
maxTokens: model.maxContext - 300 // filter token. not response maxToken
|
||||
});
|
||||
|
||||
return {
|
||||
filterMessages
|
||||
};
|
||||
}
|
||||
function getMaxTokens({
|
||||
maxToken,
|
||||
model,
|
||||
filterMessages = []
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: LLMModelItemType;
|
||||
filterMessages: ChatCompletionMessageParam[];
|
||||
}) {
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
const tokensLimit = model.maxContext;
|
||||
|
||||
/* count response max token */
|
||||
const promptsToken = countGptMessagesTokens(filterMessages);
|
||||
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
|
||||
if (maxToken <= 0) {
|
||||
maxToken = 200;
|
||||
}
|
||||
return {
|
||||
max_tokens: maxToken
|
||||
};
|
||||
}
|
||||
|
||||
function targetResponse({
|
||||
res,
|
||||
outputs,
|
||||
detail
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
outputs: ModuleItemType['outputs'];
|
||||
detail: boolean;
|
||||
}) {
|
||||
const targets =
|
||||
outputs.find((output) => output.key === ModuleOutputKeyEnum.answerText)?.targets || [];
|
||||
|
||||
if (targets.length === 0) return;
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
let answer = '';
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
if (!answer) {
|
||||
return Promise.reject('core.chat.Chat API is error or undefined');
|
||||
}
|
||||
|
||||
return { answer };
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
|
||||
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
|
||||
|
||||
type DatasetConcatProps = ModuleDispatchProps<
|
||||
{
|
||||
[ModuleInputKeyEnum.datasetMaxTokens]: number;
|
||||
} & { [key: string]: SearchDataResponseItemType[] }
|
||||
>;
|
||||
type DatasetConcatResponse = {
|
||||
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
|
||||
};
|
||||
|
||||
export async function dispatchDatasetConcat(
|
||||
props: DatasetConcatProps
|
||||
): Promise<DatasetConcatResponse> {
|
||||
const {
|
||||
params: { limit = 1500, ...quoteMap }
|
||||
} = props as DatasetConcatProps;
|
||||
|
||||
const quoteList = Object.values(quoteMap).filter((list) => Array.isArray(list));
|
||||
|
||||
const rrfConcatResults = datasetSearchResultConcat(
|
||||
quoteList.map((list) => ({
|
||||
k: 60,
|
||||
list
|
||||
}))
|
||||
);
|
||||
|
||||
return {
|
||||
[ModuleOutputKeyEnum.datasetQuoteQA]: filterSearchResultsByMaxChars(rrfConcatResults, limit)
|
||||
};
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
import {
|
||||
DispatchNodeResponseType,
|
||||
DispatchNodeResultType
|
||||
} from '@fastgpt/global/core/module/runtime/type.d';
|
||||
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
|
||||
import type { SelectedDatasetType } from '@fastgpt/global/core/module/api.d';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { ModelTypeEnum, getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
|
||||
import { searchDatasetData } from '@/service/core/dataset/data/controller';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
import { getHistories } from '../utils';
|
||||
import { datasetSearchQueryExtension } from '@fastgpt/service/core/dataset/search/utils';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { checkTeamReRankPermission } from '@fastgpt/service/support/permission/teamLimit';
|
||||
|
||||
type DatasetSearchProps = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.datasetSelectList]: SelectedDatasetType;
|
||||
[ModuleInputKeyEnum.datasetSimilarity]: number;
|
||||
[ModuleInputKeyEnum.datasetMaxTokens]: number;
|
||||
[ModuleInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
[ModuleInputKeyEnum.datasetSearchUsingReRank]: boolean;
|
||||
[ModuleInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
|
||||
[ModuleInputKeyEnum.datasetSearchExtensionModel]: string;
|
||||
[ModuleInputKeyEnum.datasetSearchExtensionBg]: string;
|
||||
}>;
|
||||
export type DatasetSearchResponse = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.datasetIsEmpty]?: boolean;
|
||||
[ModuleOutputKeyEnum.datasetUnEmpty]?: boolean;
|
||||
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
|
||||
}>;
|
||||
|
||||
export async function dispatchDatasetSearch(
|
||||
props: DatasetSearchProps
|
||||
): Promise<DatasetSearchResponse> {
|
||||
const {
|
||||
teamId,
|
||||
histories,
|
||||
module,
|
||||
params: {
|
||||
datasets = [],
|
||||
similarity,
|
||||
limit = 1500,
|
||||
usingReRank,
|
||||
searchMode,
|
||||
userChatInput,
|
||||
|
||||
datasetSearchUsingExtensionQuery,
|
||||
datasetSearchExtensionModel,
|
||||
datasetSearchExtensionBg
|
||||
}
|
||||
} = props as DatasetSearchProps;
|
||||
|
||||
if (!Array.isArray(datasets)) {
|
||||
return Promise.reject('Quote type error');
|
||||
}
|
||||
|
||||
if (datasets.length === 0) {
|
||||
return Promise.reject('core.chat.error.Select dataset empty');
|
||||
}
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('core.chat.error.User input empty');
|
||||
}
|
||||
|
||||
// query extension
|
||||
const extensionModel =
|
||||
datasetSearchUsingExtensionQuery && datasetSearchExtensionModel
|
||||
? getLLMModel(datasetSearchExtensionModel)
|
||||
: undefined;
|
||||
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
|
||||
query: userChatInput,
|
||||
extensionModel,
|
||||
extensionBg: datasetSearchExtensionBg,
|
||||
histories: getHistories(6, histories)
|
||||
});
|
||||
|
||||
// console.log(concatQueries, rewriteQuery, aiExtensionResult);
|
||||
|
||||
// get vector
|
||||
const vectorModel = getVectorModel(datasets[0]?.vectorModel?.model);
|
||||
|
||||
// start search
|
||||
const {
|
||||
searchRes,
|
||||
tokens,
|
||||
usingSimilarityFilter,
|
||||
usingReRank: searchUsingReRank
|
||||
} = await searchDatasetData({
|
||||
teamId,
|
||||
reRankQuery: `${rewriteQuery}`,
|
||||
queries: concatQueries,
|
||||
model: vectorModel.model,
|
||||
similarity,
|
||||
limit,
|
||||
datasetIds: datasets.map((item) => item.datasetId),
|
||||
searchMode,
|
||||
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId))
|
||||
});
|
||||
|
||||
// count bill results
|
||||
// vector
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: vectorModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.vector
|
||||
});
|
||||
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
|
||||
totalPoints,
|
||||
query: concatQueries.join('\n'),
|
||||
model: modelName,
|
||||
tokens,
|
||||
similarity: usingSimilarityFilter ? similarity : undefined,
|
||||
limit,
|
||||
searchMode,
|
||||
searchUsingReRank: searchUsingReRank,
|
||||
quoteList: searchRes
|
||||
};
|
||||
const nodeDispatchUsages: ChatNodeUsageType[] = [
|
||||
{
|
||||
totalPoints,
|
||||
moduleName: module.name,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
];
|
||||
|
||||
if (aiExtensionResult) {
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: aiExtensionResult.model,
|
||||
tokens: aiExtensionResult.tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
responseData.totalPoints += totalPoints;
|
||||
responseData.tokens = aiExtensionResult.tokens;
|
||||
responseData.extensionModel = modelName;
|
||||
responseData.extensionResult =
|
||||
aiExtensionResult.extensionQueries?.join('\n') ||
|
||||
JSON.stringify(aiExtensionResult.extensionQueries);
|
||||
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints,
|
||||
moduleName: 'core.module.template.Query extension',
|
||||
model: modelName,
|
||||
tokens: aiExtensionResult.tokens
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
isEmpty: searchRes.length === 0 ? true : undefined,
|
||||
unEmpty: searchRes.length > 0 ? true : undefined,
|
||||
quoteQA: searchRes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
|
||||
nodeDispatchUsages,
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: searchRes.map((item) => ({
|
||||
text: `${item.q}\n${item.a}`.trim(),
|
||||
chunkIndex: item.chunkIndex
|
||||
}))
|
||||
};
|
||||
}
|
||||
@@ -1,433 +0,0 @@
|
||||
import { NextApiResponse } from 'next';
|
||||
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import type { ChatDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import type {
|
||||
AIChatItemValueItemType,
|
||||
ChatHistoryItemResType,
|
||||
ToolRunResponseItemType
|
||||
} from '@fastgpt/global/core/chat/type.d';
|
||||
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
|
||||
import { ModuleItemType } from '@fastgpt/global/core/module/type';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { responseWriteNodeStatus } from '@fastgpt/service/common/response';
|
||||
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
|
||||
|
||||
import { dispatchHistory } from './init/history';
|
||||
import { dispatchChatInput } from './init/userChatInput';
|
||||
import { dispatchChatCompletion } from './chat/oneapi';
|
||||
import { dispatchDatasetSearch } from './dataset/search';
|
||||
import { dispatchDatasetConcat } from './dataset/concat';
|
||||
import { dispatchAnswer } from './tools/answer';
|
||||
import { dispatchClassifyQuestion } from './agent/classifyQuestion';
|
||||
import { dispatchContentExtract } from './agent/extract';
|
||||
import { dispatchHttpRequest } from './tools/http';
|
||||
import { dispatchHttp468Request } from './tools/http468';
|
||||
import { dispatchAppRequest } from './tools/runApp';
|
||||
import { dispatchQueryExtension } from './tools/queryExternsion';
|
||||
import { dispatchRunPlugin } from './plugin/run';
|
||||
import { dispatchPluginInput } from './plugin/runInput';
|
||||
import { dispatchPluginOutput } from './plugin/runOutput';
|
||||
import { checkTheModuleConnectedByTool, valueTypeFormat } from './utils';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { dispatchRunTools } from './agent/runTool/index';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { DispatchFlowResponse } from './type';
|
||||
|
||||
const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
|
||||
[FlowNodeTypeEnum.historyNode]: dispatchHistory,
|
||||
[FlowNodeTypeEnum.questionInput]: dispatchChatInput,
|
||||
[FlowNodeTypeEnum.answerNode]: dispatchAnswer,
|
||||
[FlowNodeTypeEnum.chatNode]: dispatchChatCompletion,
|
||||
[FlowNodeTypeEnum.datasetSearchNode]: dispatchDatasetSearch,
|
||||
[FlowNodeTypeEnum.datasetConcatNode]: dispatchDatasetConcat,
|
||||
[FlowNodeTypeEnum.classifyQuestion]: dispatchClassifyQuestion,
|
||||
[FlowNodeTypeEnum.contentExtract]: dispatchContentExtract,
|
||||
[FlowNodeTypeEnum.httpRequest]: dispatchHttpRequest,
|
||||
[FlowNodeTypeEnum.httpRequest468]: dispatchHttp468Request,
|
||||
[FlowNodeTypeEnum.runApp]: dispatchAppRequest,
|
||||
[FlowNodeTypeEnum.pluginModule]: dispatchRunPlugin,
|
||||
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
|
||||
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
|
||||
[FlowNodeTypeEnum.queryExtension]: dispatchQueryExtension,
|
||||
[FlowNodeTypeEnum.tools]: dispatchRunTools,
|
||||
|
||||
// none
|
||||
[FlowNodeTypeEnum.userGuide]: () => Promise.resolve()
|
||||
};
|
||||
|
||||
/* running */
|
||||
export async function dispatchWorkFlow({
|
||||
res,
|
||||
modules = [],
|
||||
runtimeModules,
|
||||
startParams = {},
|
||||
histories = [],
|
||||
variables = {},
|
||||
user,
|
||||
stream = false,
|
||||
detail = false,
|
||||
...props
|
||||
}: ChatDispatchProps & {
|
||||
modules?: ModuleItemType[]; // app modules
|
||||
runtimeModules?: RunningModuleItemType[];
|
||||
startParams?: Record<string, any>; // entry module params
|
||||
}): Promise<DispatchFlowResponse> {
|
||||
// set sse response headers
|
||||
if (stream) {
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
}
|
||||
|
||||
variables = {
|
||||
...getSystemVariable({ timezone: user.timezone }),
|
||||
...variables
|
||||
};
|
||||
const runningModules = runtimeModules ? runtimeModules : loadModules(modules, variables);
|
||||
|
||||
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
|
||||
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
|
||||
let chatNodeUsages: ChatNodeUsageType[] = [];
|
||||
let toolRunResponse: ToolRunResponseItemType[] = [];
|
||||
let runningTime = Date.now();
|
||||
|
||||
/* Store special response field */
|
||||
function pushStore(
|
||||
{ inputs = [] }: RunningModuleItemType,
|
||||
{
|
||||
answerText = '',
|
||||
responseData,
|
||||
nodeDispatchUsages,
|
||||
toolResponses,
|
||||
assistantResponses
|
||||
}: {
|
||||
[ModuleOutputKeyEnum.answerText]?: string;
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[];
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]?: AIChatItemValueItemType[]; // tool module, save the response value
|
||||
}
|
||||
) {
|
||||
const time = Date.now();
|
||||
|
||||
if (responseData) {
|
||||
chatResponses.push({
|
||||
...responseData,
|
||||
runningTime: +((time - runningTime) / 1000).toFixed(2)
|
||||
});
|
||||
}
|
||||
if (nodeDispatchUsages) {
|
||||
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
|
||||
}
|
||||
if (toolResponses) {
|
||||
if (Array.isArray(toolResponses) && toolResponses.length > 0) {
|
||||
toolRunResponse.push(toolResponses);
|
||||
} else if (Object.keys(toolResponses).length > 0) {
|
||||
toolRunResponse.push(toolResponses);
|
||||
}
|
||||
}
|
||||
if (assistantResponses) {
|
||||
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
|
||||
}
|
||||
|
||||
// save assistant text response
|
||||
if (answerText) {
|
||||
const isResponseAnswerText =
|
||||
inputs.find((item) => item.key === ModuleInputKeyEnum.aiChatIsResponseText)?.value ?? true;
|
||||
if (isResponseAnswerText) {
|
||||
chatAssistantResponse.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: answerText
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
runningTime = time;
|
||||
}
|
||||
/* Inject data into module input */
|
||||
function moduleInput(module: RunningModuleItemType, data: Record<string, any> = {}) {
|
||||
const updateInputValue = (key: string, value: any) => {
|
||||
const index = module.inputs.findIndex((item: any) => item.key === key);
|
||||
if (index === -1) return;
|
||||
module.inputs[index].value = value;
|
||||
};
|
||||
Object.entries(data).map(([key, val]: any) => {
|
||||
updateInputValue(key, val);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
/* Pass the output of the module to the next stage */
|
||||
function moduleOutput(
|
||||
module: RunningModuleItemType,
|
||||
result: Record<string, any> = {}
|
||||
): Promise<any> {
|
||||
pushStore(module, result);
|
||||
|
||||
const nextRunModules: RunningModuleItemType[] = [];
|
||||
|
||||
// Assign the output value to the next module
|
||||
module.outputs.map((outputItem) => {
|
||||
if (result[outputItem.key] === undefined) return;
|
||||
/* update output value */
|
||||
outputItem.value = result[outputItem.key];
|
||||
|
||||
/* update target */
|
||||
outputItem.targets.map((target: any) => {
|
||||
// find module
|
||||
const targetModule = runningModules.find((item) => item.moduleId === target.moduleId);
|
||||
if (!targetModule) return;
|
||||
|
||||
// push to running queue
|
||||
nextRunModules.push(targetModule);
|
||||
|
||||
// update input
|
||||
moduleInput(targetModule, { [target.key]: outputItem.value });
|
||||
});
|
||||
});
|
||||
|
||||
// Ensure the uniqueness of running modules
|
||||
const set = new Set<string>();
|
||||
const filterModules = nextRunModules.filter((module) => {
|
||||
if (set.has(module.moduleId)) return false;
|
||||
set.add(module.moduleId);
|
||||
return true;
|
||||
});
|
||||
|
||||
return checkModulesCanRun(filterModules);
|
||||
}
|
||||
function checkModulesCanRun(modules: RunningModuleItemType[] = []) {
|
||||
return Promise.all(
|
||||
modules.map((module) => {
|
||||
if (!module.inputs.find((item: any) => item.value === undefined)) {
|
||||
// remove switch
|
||||
moduleInput(module, { [ModuleInputKeyEnum.switch]: undefined });
|
||||
return moduleRun(module);
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
async function moduleRun(module: RunningModuleItemType): Promise<any> {
|
||||
if (res.closed) return Promise.resolve();
|
||||
|
||||
if (stream && detail && module.showStatus) {
|
||||
responseStatus({
|
||||
res,
|
||||
name: module.name,
|
||||
status: 'running'
|
||||
});
|
||||
}
|
||||
|
||||
// get module running params
|
||||
const params: Record<string, any> = {};
|
||||
module.inputs.forEach((item) => {
|
||||
params[item.key] = valueTypeFormat(item.value, item.valueType);
|
||||
});
|
||||
|
||||
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
|
||||
...props,
|
||||
res,
|
||||
variables,
|
||||
histories,
|
||||
user,
|
||||
stream,
|
||||
detail,
|
||||
module,
|
||||
runtimeModules: runningModules,
|
||||
params
|
||||
};
|
||||
|
||||
// run module
|
||||
const dispatchRes: Record<string, any> = await (async () => {
|
||||
if (callbackMap[module.flowType]) {
|
||||
return callbackMap[module.flowType](dispatchData);
|
||||
}
|
||||
return {};
|
||||
})();
|
||||
|
||||
// format response data. Add modulename and module type
|
||||
const formatResponseData: ChatHistoryItemResType = (() => {
|
||||
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
|
||||
return {
|
||||
moduleName: module.name,
|
||||
moduleType: module.flowType,
|
||||
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
|
||||
};
|
||||
})();
|
||||
|
||||
// Add output default value
|
||||
module.outputs.forEach((item) => {
|
||||
if (!item.required) return;
|
||||
if (dispatchRes[item.key] !== undefined) return;
|
||||
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
|
||||
});
|
||||
|
||||
// Pass userChatInput
|
||||
const hasUserChatInputTarget = !!module.outputs.find(
|
||||
(item) => item.key === ModuleOutputKeyEnum.userChatInput
|
||||
)?.targets?.length;
|
||||
|
||||
return moduleOutput(module, {
|
||||
[ModuleOutputKeyEnum.finish]: true,
|
||||
[ModuleOutputKeyEnum.userChatInput]: hasUserChatInputTarget
|
||||
? params[ModuleOutputKeyEnum.userChatInput]
|
||||
: undefined,
|
||||
...dispatchRes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData,
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]:
|
||||
dispatchRes[DispatchNodeResponseKeyEnum.nodeDispatchUsages]
|
||||
});
|
||||
}
|
||||
// start process width initInput
|
||||
const initModules = runningModules.filter((item) => item.isEntry);
|
||||
// reset entry
|
||||
modules.forEach((item) => {
|
||||
item.isEntry = false;
|
||||
});
|
||||
|
||||
initModules.map((module) =>
|
||||
moduleInput(module, {
|
||||
...startParams,
|
||||
history: [] // abandon history field. History module will get histories from other fields.
|
||||
})
|
||||
);
|
||||
await checkModulesCanRun(initModules);
|
||||
|
||||
// focus try to run pluginOutput
|
||||
const pluginOutputModule = runningModules.find(
|
||||
(item) => item.flowType === FlowNodeTypeEnum.pluginOutput
|
||||
);
|
||||
if (pluginOutputModule) {
|
||||
await moduleRun(pluginOutputModule);
|
||||
}
|
||||
|
||||
return {
|
||||
flowResponses: chatResponses,
|
||||
flowUsages: chatNodeUsages,
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]:
|
||||
concatAssistantResponseAnswerText(chatAssistantResponse),
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse
|
||||
};
|
||||
}
|
||||
|
||||
/* init store modules to running modules */
|
||||
function loadModules(
|
||||
modules: ModuleItemType[],
|
||||
variables: Record<string, any>
|
||||
): RunningModuleItemType[] {
|
||||
return modules
|
||||
.filter((item) => {
|
||||
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
|
||||
})
|
||||
.map<RunningModuleItemType>((module) => {
|
||||
return {
|
||||
moduleId: module.moduleId,
|
||||
name: module.name,
|
||||
avatar: module.avatar,
|
||||
intro: module.intro,
|
||||
flowType: module.flowType,
|
||||
showStatus: module.showStatus,
|
||||
isEntry: module.isEntry,
|
||||
inputs: module.inputs
|
||||
.filter(
|
||||
/*
|
||||
1. system input must be save
|
||||
2. connected by source handle
|
||||
3. manual input value or have default value
|
||||
4. For the module connected by the tool, leave the toolDescription input
|
||||
*/
|
||||
(item) => {
|
||||
const isTool = checkTheModuleConnectedByTool(modules, module);
|
||||
|
||||
if (isTool && item.toolDescription) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return (
|
||||
item.type === FlowNodeInputTypeEnum.systemInput ||
|
||||
item.connected ||
|
||||
item.value !== undefined
|
||||
);
|
||||
}
|
||||
) // filter unconnected target input
|
||||
.map((item) => {
|
||||
const replace = ['string'].includes(typeof item.value);
|
||||
|
||||
return {
|
||||
key: item.key,
|
||||
// variables replace
|
||||
value: replace ? replaceVariable(item.value, variables) : item.value,
|
||||
valueType: item.valueType,
|
||||
required: item.required,
|
||||
toolDescription: item.toolDescription
|
||||
};
|
||||
}),
|
||||
outputs: module.outputs
|
||||
.map((item) => ({
|
||||
key: item.key,
|
||||
required: item.required,
|
||||
defaultValue: item.defaultValue,
|
||||
answer: item.key === ModuleOutputKeyEnum.answerText,
|
||||
value: undefined,
|
||||
valueType: item.valueType,
|
||||
targets: item.targets
|
||||
}))
|
||||
.sort((a, b) => {
|
||||
// finish output always at last
|
||||
if (a.key === ModuleOutputKeyEnum.finish) return 1;
|
||||
if (b.key === ModuleOutputKeyEnum.finish) return -1;
|
||||
return 0;
|
||||
})
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/* sse response modules staus */
|
||||
export function responseStatus({
|
||||
res,
|
||||
status,
|
||||
name
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
status?: 'running' | 'finish';
|
||||
name?: string;
|
||||
}) {
|
||||
if (!name) return;
|
||||
responseWriteNodeStatus({
|
||||
res,
|
||||
name
|
||||
});
|
||||
}
|
||||
|
||||
/* get system variable */
|
||||
export function getSystemVariable({ timezone }: { timezone: string }) {
|
||||
return {
|
||||
cTime: getSystemTime(timezone)
|
||||
};
|
||||
}
|
||||
|
||||
export const concatAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
|
||||
const result: AIChatItemValueItemType[] = [];
|
||||
// 合并连续的text
|
||||
for (let i = 0; i < response.length; i++) {
|
||||
const item = response[i];
|
||||
if (item.type === ChatItemValueTypeEnum.text) {
|
||||
let text = item.text?.content || '';
|
||||
const lastItem = result[result.length - 1];
|
||||
if (lastItem && lastItem.type === ChatItemValueTypeEnum.text && lastItem.text?.content) {
|
||||
lastItem.text.content += text;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
result.push(item);
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
@@ -1,19 +0,0 @@
|
||||
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { getHistories } from '../utils';
|
||||
export type HistoryProps = ModuleDispatchProps<{
|
||||
maxContext?: number;
|
||||
[ModuleInputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchHistory = (props: Record<string, any>) => {
|
||||
const {
|
||||
histories,
|
||||
params: { maxContext }
|
||||
} = props as HistoryProps;
|
||||
|
||||
return {
|
||||
history: getHistories(maxContext, histories)
|
||||
};
|
||||
};
|
||||
@@ -1,14 +0,0 @@
|
||||
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
export type UserChatInputProps = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
}>;
|
||||
|
||||
export const dispatchChatInput = (props: Record<string, any>) => {
|
||||
const {
|
||||
params: { userChatInput }
|
||||
} = props as UserChatInputProps;
|
||||
return {
|
||||
userChatInput
|
||||
};
|
||||
};
|
||||
@@ -1,99 +0,0 @@
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
|
||||
import { DYNAMIC_INPUT_KEY, ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { getPluginRuntimeById } from '@fastgpt/service/core/plugin/controller';
|
||||
import { authPluginCanUse } from '@fastgpt/service/support/permission/auth/plugin';
|
||||
import { setEntryEntries } from '../utils';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
|
||||
type RunPluginProps = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.pluginId]: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type RunPluginResponse = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
|
||||
const {
|
||||
mode,
|
||||
teamId,
|
||||
tmbId,
|
||||
module,
|
||||
params: { pluginId, ...data }
|
||||
} = props;
|
||||
|
||||
if (!pluginId) {
|
||||
return Promise.reject('pluginId can not find');
|
||||
}
|
||||
|
||||
await authPluginCanUse({ id: pluginId, teamId, tmbId });
|
||||
const plugin = await getPluginRuntimeById(pluginId);
|
||||
|
||||
// concat dynamic inputs
|
||||
const inputModule = plugin.modules.find((item) => item.flowType === FlowNodeTypeEnum.pluginInput);
|
||||
if (!inputModule) return Promise.reject('Plugin error, It has no set input.');
|
||||
const hasDynamicInput = inputModule.inputs.find((input) => input.key === DYNAMIC_INPUT_KEY);
|
||||
|
||||
const startParams: Record<string, any> = (() => {
|
||||
if (!hasDynamicInput) return data;
|
||||
|
||||
const params: Record<string, any> = {
|
||||
[DYNAMIC_INPUT_KEY]: {}
|
||||
};
|
||||
|
||||
for (const key in data) {
|
||||
const input = inputModule.inputs.find((input) => input.key === key);
|
||||
if (input) {
|
||||
params[key] = data[key];
|
||||
} else {
|
||||
params[DYNAMIC_INPUT_KEY][key] = data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return params;
|
||||
})();
|
||||
|
||||
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
|
||||
...props,
|
||||
modules: setEntryEntries(plugin.modules).map((module) => ({
|
||||
...module,
|
||||
showStatus: false
|
||||
})),
|
||||
runtimeModules: undefined, // must reset
|
||||
startParams
|
||||
});
|
||||
|
||||
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
|
||||
|
||||
if (output) {
|
||||
output.moduleLogo = plugin.avatar;
|
||||
}
|
||||
|
||||
return {
|
||||
assistantResponses,
|
||||
// responseData, // debug
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: plugin.avatar,
|
||||
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
|
||||
pluginOutput: output?.pluginOutput,
|
||||
pluginDetail:
|
||||
mode === 'test' && plugin.teamId === teamId
|
||||
? flowResponses.filter((item) => {
|
||||
const filterArr = [FlowNodeTypeEnum.pluginOutput];
|
||||
return !filterArr.includes(item.moduleType as any);
|
||||
})
|
||||
: undefined
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: plugin.name,
|
||||
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
|
||||
model: plugin.name,
|
||||
tokens: 0
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput ? output.pluginOutput : {},
|
||||
...(output ? output.pluginOutput : {})
|
||||
};
|
||||
};
|
||||
@@ -1,11 +0,0 @@
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
|
||||
export type PluginInputProps = ModuleDispatchProps<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
export const dispatchPluginInput = (props: PluginInputProps) => {
|
||||
const { params } = props;
|
||||
|
||||
return params;
|
||||
};
|
||||
@@ -1,19 +0,0 @@
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type.d';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
|
||||
export type PluginOutputProps = ModuleDispatchProps<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
export type PluginOutputResponse = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchPluginOutput = (props: PluginOutputProps): PluginOutputResponse => {
|
||||
const { params } = props;
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
pluginOutput: params
|
||||
}
|
||||
};
|
||||
};
|
||||
@@ -1,36 +0,0 @@
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { responseWrite } from '@fastgpt/service/common/response';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
export type AnswerProps = ModuleDispatchProps<{
|
||||
text: string;
|
||||
}>;
|
||||
export type AnswerResponse = {
|
||||
[ModuleOutputKeyEnum.answerText]: string;
|
||||
};
|
||||
|
||||
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
const {
|
||||
res,
|
||||
detail,
|
||||
stream,
|
||||
params: { text = '' }
|
||||
} = props as AnswerProps;
|
||||
|
||||
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
|
||||
|
||||
if (stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: `\n${formatText}`
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
[ModuleOutputKeyEnum.answerText]: formatText
|
||||
};
|
||||
};
|
||||
@@ -1,251 +0,0 @@
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import {
|
||||
DYNAMIC_INPUT_KEY,
|
||||
ModuleInputKeyEnum,
|
||||
ModuleOutputKeyEnum
|
||||
} from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import axios from 'axios';
|
||||
import { valueTypeFormat } from '../utils';
|
||||
import { SERVICE_LOCAL_HOST } from '@fastgpt/service/common/system/tools';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
|
||||
type HttpRequestProps = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.abandon_httpUrl]: string;
|
||||
[ModuleInputKeyEnum.httpMethod]: string;
|
||||
[ModuleInputKeyEnum.httpReqUrl]: string;
|
||||
[ModuleInputKeyEnum.httpHeaders]: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type HttpResponse = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.failed]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
const flatDynamicParams = (params: Record<string, any>) => {
|
||||
const dynamicParams = params[DYNAMIC_INPUT_KEY];
|
||||
if (!dynamicParams) return params;
|
||||
return {
|
||||
...params,
|
||||
...dynamicParams,
|
||||
[DYNAMIC_INPUT_KEY]: undefined
|
||||
};
|
||||
};
|
||||
|
||||
export const dispatchHttpRequest = async (props: HttpRequestProps): Promise<HttpResponse> => {
|
||||
let {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
module: { outputs },
|
||||
params: {
|
||||
system_httpMethod: httpMethod = 'POST',
|
||||
system_httpReqUrl: httpReqUrl,
|
||||
system_httpHeader: httpHeader,
|
||||
...body
|
||||
}
|
||||
} = props;
|
||||
|
||||
if (!httpReqUrl) {
|
||||
return Promise.reject('Http url is empty');
|
||||
}
|
||||
|
||||
body = flatDynamicParams(body);
|
||||
|
||||
const requestBody = {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
data: body
|
||||
};
|
||||
const requestQuery = {
|
||||
appId,
|
||||
chatId,
|
||||
...variables,
|
||||
...body
|
||||
};
|
||||
|
||||
const formatBody = transformFlatJson({ ...requestBody });
|
||||
|
||||
// parse header
|
||||
const headers = await (() => {
|
||||
try {
|
||||
if (!httpHeader) return {};
|
||||
return JSON.parse(httpHeader);
|
||||
} catch (error) {
|
||||
return Promise.reject('Header 为非法 JSON 格式');
|
||||
}
|
||||
})();
|
||||
|
||||
try {
|
||||
const response = await fetchData({
|
||||
method: httpMethod,
|
||||
url: httpReqUrl,
|
||||
headers,
|
||||
body: formatBody,
|
||||
query: requestQuery
|
||||
});
|
||||
|
||||
// format output value type
|
||||
const results: Record<string, any> = {};
|
||||
for (const key in response) {
|
||||
const output = outputs.find((item) => item.key === key);
|
||||
if (!output) continue;
|
||||
results[key] = valueTypeFormat(response[key], output.valueType);
|
||||
}
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: formatBody,
|
||||
httpResult: response
|
||||
},
|
||||
...results
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
|
||||
return {
|
||||
[ModuleOutputKeyEnum.failed]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: formatBody,
|
||||
httpResult: { error }
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function fetchData({
|
||||
method,
|
||||
url,
|
||||
headers,
|
||||
body,
|
||||
query
|
||||
}: {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: Record<string, any>;
|
||||
body: Record<string, any>;
|
||||
query: Record<string, any>;
|
||||
}): Promise<Record<string, any>> {
|
||||
const { data: response } = await axios<Record<string, any>>({
|
||||
method,
|
||||
baseURL: `http://${SERVICE_LOCAL_HOST}`,
|
||||
url,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers
|
||||
},
|
||||
timeout: 360000,
|
||||
params: method === 'GET' ? query : {},
|
||||
data: method === 'POST' ? body : {}
|
||||
});
|
||||
|
||||
/*
|
||||
parse the json:
|
||||
{
|
||||
user: {
|
||||
name: 'xxx',
|
||||
age: 12
|
||||
},
|
||||
list: [
|
||||
{
|
||||
name: 'xxx',
|
||||
age: 50
|
||||
},
|
||||
[{ test: 22 }]
|
||||
],
|
||||
psw: 'xxx'
|
||||
}
|
||||
|
||||
result: {
|
||||
'user': { name: 'xxx', age: 12 },
|
||||
'user.name': 'xxx',
|
||||
'user.age': 12,
|
||||
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
|
||||
'list[0]': { name: 'xxx', age: 50 },
|
||||
'list[0].name': 'xxx',
|
||||
'list[0].age': 50,
|
||||
'list[1]': [ { test: 22 } ],
|
||||
'list[1][0]': { test: 22 },
|
||||
'list[1][0].test': 22,
|
||||
'psw': 'xxx'
|
||||
}
|
||||
*/
|
||||
const parseJson = (obj: Record<string, any>, prefix = '') => {
|
||||
let result: Record<string, any> = {};
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
for (let i = 0; i < obj.length; i++) {
|
||||
result[`${prefix}[${i}]`] = obj[i];
|
||||
|
||||
if (Array.isArray(obj[i])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}]`)
|
||||
};
|
||||
} else if (typeof obj[i] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}].`)
|
||||
};
|
||||
}
|
||||
}
|
||||
} else if (typeof obj == 'object') {
|
||||
for (const key in obj) {
|
||||
result[`${prefix}${key}`] = obj[key];
|
||||
|
||||
if (Array.isArray(obj[key])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}`)
|
||||
};
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}.`)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
return parseJson(response);
|
||||
}
|
||||
|
||||
function transformFlatJson(obj: Record<string, any>) {
|
||||
for (let key in obj) {
|
||||
if (typeof obj[key] === 'object') {
|
||||
transformFlatJson(obj[key]);
|
||||
}
|
||||
if (key.includes('.')) {
|
||||
let parts = key.split('.');
|
||||
if (parts.length <= 1) continue;
|
||||
|
||||
const firstKey = parts.shift();
|
||||
|
||||
if (!firstKey) continue;
|
||||
|
||||
const lastKey = parts.join('.');
|
||||
|
||||
if (obj[firstKey]) {
|
||||
obj[firstKey] = {
|
||||
...obj[firstKey],
|
||||
[lastKey]: obj[key]
|
||||
};
|
||||
} else {
|
||||
obj[firstKey] = { [lastKey]: obj[key] };
|
||||
}
|
||||
|
||||
transformFlatJson(obj[firstKey]);
|
||||
|
||||
delete obj[key];
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import {
|
||||
DYNAMIC_INPUT_KEY,
|
||||
ModuleInputKeyEnum,
|
||||
ModuleOutputKeyEnum
|
||||
} from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import axios from 'axios';
|
||||
import { valueTypeFormat } from '../utils';
|
||||
import { SERVICE_LOCAL_HOST } from '@fastgpt/service/common/system/tools';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
|
||||
type PropsArrType = {
|
||||
key: string;
|
||||
type: string;
|
||||
value: string;
|
||||
};
|
||||
type HttpRequestProps = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.abandon_httpUrl]: string;
|
||||
[ModuleInputKeyEnum.httpMethod]: string;
|
||||
[ModuleInputKeyEnum.httpReqUrl]: string;
|
||||
[ModuleInputKeyEnum.httpHeaders]: PropsArrType[];
|
||||
[ModuleInputKeyEnum.httpParams]: PropsArrType[];
|
||||
[ModuleInputKeyEnum.httpJsonBody]: string;
|
||||
[DYNAMIC_INPUT_KEY]: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type HttpResponse = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.failed]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
|
||||
|
||||
export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<HttpResponse> => {
|
||||
let {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
variables,
|
||||
module: { moduleId, outputs },
|
||||
histories,
|
||||
params: {
|
||||
system_httpMethod: httpMethod = 'POST',
|
||||
system_httpReqUrl: httpReqUrl,
|
||||
system_httpHeader: httpHeader,
|
||||
system_httpParams: httpParams = [],
|
||||
system_httpJsonBody: httpJsonBody,
|
||||
[DYNAMIC_INPUT_KEY]: dynamicInput,
|
||||
...body
|
||||
}
|
||||
} = props;
|
||||
|
||||
if (!httpReqUrl) {
|
||||
return Promise.reject('Http url is empty');
|
||||
}
|
||||
|
||||
const concatVariables = {
|
||||
appId,
|
||||
chatId,
|
||||
responseChatItemId,
|
||||
...variables,
|
||||
histories: histories.slice(0, 10),
|
||||
...body
|
||||
};
|
||||
|
||||
// parse header
|
||||
const headers = await (() => {
|
||||
try {
|
||||
if (!httpHeader || httpHeader.length === 0) return {};
|
||||
// array
|
||||
return httpHeader.reduce((acc: Record<string, string>, item) => {
|
||||
const key = replaceVariable(item.key, concatVariables);
|
||||
const value = replaceVariable(item.value, concatVariables);
|
||||
acc[key] = valueTypeFormat(value, 'string');
|
||||
return acc;
|
||||
}, {});
|
||||
} catch (error) {
|
||||
return Promise.reject('Header 为非法 JSON 格式');
|
||||
}
|
||||
})();
|
||||
const params = httpParams.reduce((acc: Record<string, string>, item) => {
|
||||
const key = replaceVariable(item.key, concatVariables);
|
||||
const value = replaceVariable(item.value, concatVariables);
|
||||
acc[key] = valueTypeFormat(value, 'string');
|
||||
return acc;
|
||||
}, {});
|
||||
const requestBody = await (() => {
|
||||
if (!httpJsonBody) return { [DYNAMIC_INPUT_KEY]: dynamicInput };
|
||||
httpJsonBody = replaceVariable(httpJsonBody, concatVariables);
|
||||
try {
|
||||
const jsonParse = JSON.parse(httpJsonBody);
|
||||
const removeSignJson = removeUndefinedSign(jsonParse);
|
||||
return { [DYNAMIC_INPUT_KEY]: dynamicInput, ...removeSignJson };
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return Promise.reject(`Invalid JSON body: ${httpJsonBody}`);
|
||||
}
|
||||
})();
|
||||
// console.log(params, requestBody, headers, concatVariables);
|
||||
|
||||
try {
|
||||
const { formatResponse, rawResponse } = await fetchData({
|
||||
method: httpMethod,
|
||||
url: httpReqUrl,
|
||||
headers,
|
||||
body: requestBody,
|
||||
params
|
||||
});
|
||||
|
||||
// format output value type
|
||||
const results: Record<string, any> = {};
|
||||
for (const key in formatResponse) {
|
||||
const output = outputs.find((item) => item.key === key);
|
||||
if (!output) continue;
|
||||
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
|
||||
}
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
headers: Object.keys(headers).length > 0 ? headers : undefined,
|
||||
httpResult: rawResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: results,
|
||||
[ModuleOutputKeyEnum.httpRawResponse]: rawResponse,
|
||||
...results
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Http request error', error);
|
||||
return {
|
||||
[ModuleOutputKeyEnum.failed]: true,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
headers: Object.keys(headers).length > 0 ? headers : undefined,
|
||||
httpResult: { error: formatHttpError(error) }
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function fetchData({
|
||||
method,
|
||||
url,
|
||||
headers,
|
||||
body,
|
||||
params
|
||||
}: {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: Record<string, any>;
|
||||
body: Record<string, any>;
|
||||
params: Record<string, any>;
|
||||
}): Promise<Record<string, any>> {
|
||||
const { data: response } = await axios<Record<string, any>>({
|
||||
method,
|
||||
baseURL: `http://${SERVICE_LOCAL_HOST}`,
|
||||
url,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers
|
||||
},
|
||||
params: params,
|
||||
data: method === 'POST' ? body : {}
|
||||
});
|
||||
|
||||
/*
|
||||
parse the json:
|
||||
{
|
||||
user: {
|
||||
name: 'xxx',
|
||||
age: 12
|
||||
},
|
||||
list: [
|
||||
{
|
||||
name: 'xxx',
|
||||
age: 50
|
||||
},
|
||||
[{ test: 22 }]
|
||||
],
|
||||
psw: 'xxx'
|
||||
}
|
||||
|
||||
result: {
|
||||
'user': { name: 'xxx', age: 12 },
|
||||
'user.name': 'xxx',
|
||||
'user.age': 12,
|
||||
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
|
||||
'list[0]': { name: 'xxx', age: 50 },
|
||||
'list[0].name': 'xxx',
|
||||
'list[0].age': 50,
|
||||
'list[1]': [ { test: 22 } ],
|
||||
'list[1][0]': { test: 22 },
|
||||
'list[1][0].test': 22,
|
||||
'psw': 'xxx'
|
||||
}
|
||||
*/
|
||||
const parseJson = (obj: Record<string, any>, prefix = '') => {
|
||||
let result: Record<string, any> = {};
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
for (let i = 0; i < obj.length; i++) {
|
||||
result[`${prefix}[${i}]`] = obj[i];
|
||||
|
||||
if (Array.isArray(obj[i])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}]`)
|
||||
};
|
||||
} else if (typeof obj[i] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[i], `${prefix}[${i}].`)
|
||||
};
|
||||
}
|
||||
}
|
||||
} else if (typeof obj == 'object') {
|
||||
for (const key in obj) {
|
||||
result[`${prefix}${key}`] = obj[key];
|
||||
|
||||
if (Array.isArray(obj[key])) {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}`)
|
||||
};
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
result = {
|
||||
...result,
|
||||
...parseJson(obj[key], `${prefix}${key}.`)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
return {
|
||||
formatResponse: parseJson(response),
|
||||
rawResponse: response
|
||||
};
|
||||
}
|
||||
|
||||
function replaceVariable(text: string, obj: Record<string, any>) {
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
if (value === undefined) {
|
||||
text = text.replace(new RegExp(`{{${key}}}`, 'g'), UNDEFINED_SIGN);
|
||||
} else {
|
||||
const replacement = JSON.stringify(value);
|
||||
const unquotedReplacement =
|
||||
replacement.startsWith('"') && replacement.endsWith('"')
|
||||
? replacement.slice(1, -1)
|
||||
: replacement;
|
||||
text = text.replace(new RegExp(`{{${key}}}`, 'g'), unquotedReplacement);
|
||||
}
|
||||
}
|
||||
return text || '';
|
||||
}
|
||||
function removeUndefinedSign(obj: Record<string, any>) {
|
||||
for (const key in obj) {
|
||||
if (obj[key] === UNDEFINED_SIGN) {
|
||||
obj[key] = undefined;
|
||||
} else if (Array.isArray(obj[key])) {
|
||||
obj[key] = obj[key].map((item: any) => {
|
||||
if (item === UNDEFINED_SIGN) {
|
||||
return undefined;
|
||||
} else if (typeof item === 'object') {
|
||||
removeUndefinedSign(item);
|
||||
}
|
||||
return item;
|
||||
});
|
||||
} else if (typeof obj[key] === 'object') {
|
||||
removeUndefinedSign(obj[key]);
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
function formatHttpError(error: any) {
|
||||
return {
|
||||
message: error?.message,
|
||||
name: error?.name,
|
||||
method: error?.config?.method,
|
||||
baseURL: error?.config?.baseURL,
|
||||
url: error?.config?.url,
|
||||
code: error?.code,
|
||||
status: error?.status
|
||||
};
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { ModelTypeEnum, getLLMModel } from '@fastgpt/service/core/ai/model';
|
||||
import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils';
|
||||
import { queryExtension } from '@fastgpt/service/core/ai/functions/queryExtension';
|
||||
import { getHistories } from '../utils';
|
||||
import { hashStr } from '@fastgpt/global/common/string/tools';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.aiModel]: string;
|
||||
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.text]: string;
|
||||
}>;
|
||||
|
||||
export const dispatchQueryExtension = async ({
|
||||
histories,
|
||||
module,
|
||||
params: { model, systemPrompt, history, userChatInput }
|
||||
}: Props): Promise<Response> => {
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Question is empty');
|
||||
}
|
||||
|
||||
const queryExtensionModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { extensionQueries, tokens } = await queryExtension({
|
||||
chatBg: systemPrompt,
|
||||
query: userChatInput,
|
||||
histories: chatHistories,
|
||||
model: queryExtensionModel.model
|
||||
});
|
||||
|
||||
extensionQueries.unshift(userChatInput);
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: queryExtensionModel.model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
const set = new Set<string>();
|
||||
const filterSameQueries = extensionQueries.filter((item) => {
|
||||
// 删除所有的标点符号与空格等,只对文本进行比较
|
||||
const str = hashStr(item.replace(/[^\p{L}\p{N}]/gu, ''));
|
||||
if (set.has(str)) return false;
|
||||
set.add(str);
|
||||
return true;
|
||||
});
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
query: userChatInput,
|
||||
textOutput: JSON.stringify(filterSameQueries)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: module.name,
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[ModuleOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
|
||||
};
|
||||
};
|
||||
@@ -1,107 +0,0 @@
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
|
||||
import { SelectAppItemType } from '@fastgpt/global/core/module/type';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { MongoApp } from '@fastgpt/service/core/app/schema';
|
||||
import { responseWrite } from '@fastgpt/service/common/response';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { getHistories, setEntryEntries } from '../utils';
|
||||
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
app: SelectAppItemType;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[ModuleOutputKeyEnum.answerText]: string;
|
||||
[ModuleOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
res,
|
||||
teamId,
|
||||
stream,
|
||||
detail,
|
||||
histories,
|
||||
inputFiles,
|
||||
params: { userChatInput, history, app }
|
||||
} = props;
|
||||
let start = Date.now();
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const appData = await MongoApp.findOne({
|
||||
_id: app.id,
|
||||
teamId
|
||||
});
|
||||
|
||||
if (!appData) {
|
||||
return Promise.reject('App not found');
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? SseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
|
||||
...props,
|
||||
appId: app.id,
|
||||
modules: setEntryEntries(appData.modules),
|
||||
runtimeModules: undefined, // must reset
|
||||
histories: chatHistories,
|
||||
inputFiles,
|
||||
startParams: {
|
||||
userChatInput
|
||||
}
|
||||
});
|
||||
|
||||
const completeMessages = chatHistories.concat([
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
files: inputFiles,
|
||||
text: userChatInput
|
||||
})
|
||||
},
|
||||
{
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: assistantResponses
|
||||
}
|
||||
]);
|
||||
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: appData.avatar,
|
||||
query: userChatInput,
|
||||
textOutput: text,
|
||||
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: appData.name,
|
||||
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
|
||||
}
|
||||
],
|
||||
answerText: text,
|
||||
history: completeMessages
|
||||
};
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
import {
|
||||
AIChatItemValueItemType,
|
||||
ChatHistoryItemResType,
|
||||
ChatItemValueItemType,
|
||||
ToolRunResponseItemType
|
||||
} from '@fastgpt/global/core/chat/type';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
|
||||
export type DispatchFlowResponse = {
|
||||
flowResponses: ChatHistoryItemResType[];
|
||||
flowUsages: ChatNodeUsageType[];
|
||||
// [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: ChatNodeUsageType[];
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType[];
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
|
||||
};
|
||||
@@ -1,62 +0,0 @@
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ModuleIOValueTypeEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
|
||||
import { ModuleItemType } from '@fastgpt/global/core/module/type.d';
|
||||
|
||||
export const setEntryEntries = (modules: ModuleItemType[]) => {
|
||||
const initRunningModuleType: Record<string, boolean> = {
|
||||
[FlowNodeTypeEnum.historyNode]: true,
|
||||
[FlowNodeTypeEnum.questionInput]: true,
|
||||
[FlowNodeTypeEnum.pluginInput]: true
|
||||
};
|
||||
|
||||
modules.forEach((item) => {
|
||||
if (initRunningModuleType[item.flowType]) {
|
||||
item.isEntry = true;
|
||||
}
|
||||
});
|
||||
return modules;
|
||||
};
|
||||
|
||||
export const checkTheModuleConnectedByTool = (
|
||||
modules: ModuleItemType[],
|
||||
module: ModuleItemType
|
||||
) => {
|
||||
let sign = false;
|
||||
const toolModules = modules.filter((item) => item.flowType === FlowNodeTypeEnum.tools);
|
||||
|
||||
toolModules.forEach((item) => {
|
||||
const toolOutput = item.outputs.find(
|
||||
(output) => output.key === ModuleOutputKeyEnum.selectedTools
|
||||
);
|
||||
toolOutput?.targets.forEach((target) => {
|
||||
if (target.moduleId === module.moduleId) {
|
||||
sign = true;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return sign;
|
||||
};
|
||||
|
||||
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
|
||||
if (!history) return [];
|
||||
if (typeof history === 'number') return histories.slice(-history);
|
||||
if (Array.isArray(history)) return history;
|
||||
|
||||
return [];
|
||||
};
|
||||
|
||||
/* value type format */
|
||||
export const valueTypeFormat = (value: any, type?: `${ModuleIOValueTypeEnum}`) => {
|
||||
if (value === undefined) return;
|
||||
|
||||
if (type === 'string') {
|
||||
if (typeof value !== 'object') return String(value);
|
||||
return JSON.stringify(value);
|
||||
}
|
||||
if (type === 'number') return Number(value);
|
||||
if (type === 'boolean') return Boolean(value);
|
||||
|
||||
return value;
|
||||
};
|
||||
@@ -21,18 +21,17 @@ export function connectToDatabase(): Promise<void> {
|
||||
initGlobal();
|
||||
},
|
||||
afterHook: async () => {
|
||||
// init system config
|
||||
getInitConfig();
|
||||
// init vector database, init root user
|
||||
await Promise.all([initVectorStore(), initRootUser()]);
|
||||
|
||||
startMongoWatch();
|
||||
// cron
|
||||
startCron();
|
||||
// init system config
|
||||
getInitConfig();
|
||||
|
||||
// init vector database
|
||||
await initVectorStore();
|
||||
// start queue
|
||||
startTrainingQueue(true);
|
||||
|
||||
initRootUser();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user