* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile
This commit is contained in:
Archer
2025-01-22 22:59:28 +08:00
committed by GitHub
parent 16629e32a7
commit e009be51e7
93 changed files with 2361 additions and 564 deletions

View File

@@ -6,8 +6,7 @@ import { FastGPTProUrl } from '../constants';
export const getFastGPTConfigFromDB = async () => {
if (!FastGPTProUrl) {
return {
config: {} as FastGPTConfigFileType,
configId: undefined
config: {} as FastGPTConfigFileType
};
}
@@ -18,9 +17,25 @@ export const getFastGPTConfigFromDB = async () => {
});
const config = res?.value || {};
// 利用配置文件的创建时间(更新时间)来做缓存,如果前端命中缓存,则不需要再返回配置文件
global.systemInitBufferId = res ? res.createTime.getTime().toString() : undefined;
return {
configId: res ? String(res._id) : undefined,
config: config as FastGPTConfigFileType
};
};
export const updateFastGPTConfigBuffer = async () => {
const res = await MongoSystemConfigs.findOne({
type: SystemConfigsTypeEnum.fastgpt
}).sort({
createTime: -1
});
if (!res) return;
res.createTime = new Date();
await res.save();
global.systemInitBufferId = res.createTime.getTime().toString();
};

View File

@@ -13,15 +13,6 @@ export const initFastGPTConfig = (config?: FastGPTConfigFileType) => {
global.feConfigs = config.feConfigs;
global.systemEnv = config.systemEnv;
global.subPlans = config.subPlans;
global.llmModels = config.llmModels;
global.llmModelPriceType = global.llmModels.some((item) => typeof item.inputPrice === 'number')
? 'IO'
: 'Tokens';
global.vectorModels = config.vectorModels;
global.audioSpeechModels = config.audioSpeechModels;
global.whisperModel = config.whisperModel;
global.reRankModels = config.reRankModels;
};
export const systemStartCb = () => {

View File

@@ -2,7 +2,7 @@
import { PgVectorCtrl } from './pg/class';
import { getVectorsByText } from '../../core/ai/embedding';
import { InsertVectorProps } from './controller.d';
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { MILVUS_ADDRESS, PG_ADDRESS } from './constants';
import { MilvusCtrl } from './milvus/class';
@@ -28,7 +28,7 @@ export const insertDatasetDataVector = async ({
...props
}: InsertVectorProps & {
query: string;
model: VectorModelItemType;
model: EmbeddingModelItemType;
}) => {
const { vectors, tokens } = await getVectorsByText({
model,

View File

@@ -2,6 +2,7 @@ import fs from 'fs';
import { getAxiosConfig } from '../config';
import axios from 'axios';
import FormData from 'form-data';
import { getSTTModel } from '../model';
export const aiTranscriptions = async ({
model,
@@ -14,13 +15,21 @@ export const aiTranscriptions = async ({
data.append('model', model);
data.append('file', fileStream);
const modelData = getSTTModel(model);
const aiAxiosConfig = getAxiosConfig();
const { data: result } = await axios<{ text: string }>({
method: 'post',
baseURL: aiAxiosConfig.baseUrl,
url: '/audio/transcriptions',
...(modelData.requestUrl
? { url: modelData.requestUrl }
: {
baseURL: aiAxiosConfig.baseUrl,
url: modelData.requestUrl || '/audio/transcriptions'
}),
headers: {
Authorization: aiAxiosConfig.authorization,
Authorization: modelData.requestAuth
? `Bearer ${modelData.requestAuth}`
: aiAxiosConfig.authorization,
...data.getHeaders()
},
data: data

View File

@@ -7,6 +7,7 @@ import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
import { i18nT } from '../../../web/i18n/utils';
import { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
import { getLLMModel } from './model';
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
@@ -63,12 +64,23 @@ export const createChatCompletion = async <T extends CompletionsBodyType>({
getEmptyResponseTip: () => string;
}> => {
try {
const modelConstantsData = getLLMModel(body.model);
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
const ai = getAIApi({
userKey,
timeout: formatTimeout
});
const response = await ai.chat.completions.create(body, options);
const response = await ai.chat.completions.create(body, {
...options,
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),
headers: {
...options?.headers,
...(modelConstantsData.requestAuth
? { Authorization: `Bearer ${modelConstantsData.requestAuth}` }
: {})
}
});
const isStreamResponse =
typeof response === 'object' &&

View File

@@ -0,0 +1,10 @@
{
"provider": "OpenAI",
"model": "text-embedding-3-small",
"name": "text-embedding-3-small",
"defaultToken": 512,
"maxToken": 3000,
"charsPointsPrice": 0
}

View File

@@ -3,9 +3,8 @@
"model": "text-embedding-ada-002",
"name": "text-embedding-ada-002",
"defaultToken": 512, // 默认分块 token
"maxToken": 3000, // 最大分块 token
"weight": 0, // 权重
"defaultToken": 512,
"maxToken": 3000,
"charsPointsPrice": 0 // 积分/1k token
"charsPointsPrice": 0
}

View File

@@ -1,33 +1,28 @@
{
"provider": "OpenAI",
"model": "gpt-4o-mini",
"name": "GPT-4o-mini", // alias
"name": "GPT-4o-mini",
"maxContext": 125000, // 最大上下文
"maxResponse": 16000, // 最大回复
"quoteMaxToken": 60000, // 最大引用
"maxTemperature": 1.2, // 最大温度
"presencePenaltyRange": [-2, 2], // 惩罚系数范围
"frequencyPenaltyRange": [-2, 2], // 频率惩罚系数范围
"responseFormatList": ["text", "json_object", "json_schema"], // 响应格式
"showStopSign": true, // 是否显示停止符号
"censor": false,
"charsPointsPrice": 0,
"vision": true, // 是否支持图片识别
"toolChoice": true, // 是否支持工具调用
"functionCall": false, // 是否支持函数调用(一般都可以 false 了,基本不用了)
"defaultSystemChatPrompt": "", // 默认系统提示
"maxContext": 125000,
"maxResponse": 16000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"datasetProcess": true, // 用于知识库文本处理
"usedInClassify": true, // 用于问题分类
"customCQPrompt": "", // 自定义问题分类提示
"usedInExtractFields": true, // 用于提取字段
"customExtractPrompt": "", // 自定义提取提示
"usedInToolCall": true, // 用于工具调用
"usedInQueryExtension": true, // 用于问题优化
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"defaultConfig": {}, // 额外的自定义 body
"fieldMap": {}, // body 字段映射
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"censor": false, // 是否开启敏感词过滤
"charsPointsPrice": 0 // n 积分/1k token
"defaultConfig": {},
"fieldMap": {}
}

View File

@@ -0,0 +1,21 @@
import { connectionMongo, getMongoModel } from '../../../common/mongo';
const { Schema } = connectionMongo;
import type { SystemModelSchemaType } from '../type';
const SystemModelSchema = new Schema({
model: {
type: String,
required: true,
unique: true
},
metadata: {
type: Object,
required: true,
default: {}
}
});
export const MongoSystemModel = getMongoModel<SystemModelSchemaType>(
'system_models',
SystemModelSchema
);

View File

@@ -0,0 +1,118 @@
import path from 'path';
import * as fs from 'fs';
import { SystemModelItemType } from '../type';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { MongoSystemModel } from './schema';
import {
LLMModelItemType,
EmbeddingModelItemType,
TTSModelType,
STTModelType,
ReRankModelItemType
} from '@fastgpt/global/core/ai/model.d';
import { debounce } from 'lodash';
type FolderBaseType = `${ModelTypeEnum}`;
export const loadSystemModels = async (init = false) => {
const getModelNameList = (base: FolderBaseType) => {
const currentFileUrl = new URL(import.meta.url);
const modelsPath = path.join(path.dirname(currentFileUrl.pathname), base);
return fs.readdirSync(modelsPath) as string[];
};
const pushModel = (model: SystemModelItemType) => {
global.systemModelList.push(model);
if (model.isActive) {
global.systemActiveModelList.push(model);
if (model.type === ModelTypeEnum.llm) {
global.llmModelMap.set(model.model, model);
global.llmModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.embedding) {
global.embeddingModelMap.set(model.model, model);
global.embeddingModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.tts) {
global.ttsModelMap.set(model.model, model);
global.ttsModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.stt) {
global.sttModelMap.set(model.model, model);
global.sttModelMap.set(model.name, model);
} else if (model.type === ModelTypeEnum.rerank) {
global.reRankModelMap.set(model.model, model);
global.reRankModelMap.set(model.name, model);
}
}
};
if (!init && global.systemModelList && global.systemModelList.length > 0) return;
const dbModels = await MongoSystemModel.find({}).lean();
global.systemModelList = [];
global.systemActiveModelList = [];
global.llmModelMap = new Map<string, LLMModelItemType>();
global.embeddingModelMap = new Map<string, EmbeddingModelItemType>();
global.ttsModelMap = new Map<string, TTSModelType>();
global.sttModelMap = new Map<string, STTModelType>();
global.reRankModelMap = new Map<string, ReRankModelItemType>();
const baseList: FolderBaseType[] = [
ModelTypeEnum.llm,
ModelTypeEnum.embedding,
ModelTypeEnum.tts,
ModelTypeEnum.stt,
ModelTypeEnum.rerank
];
// System model
await Promise.all(
baseList.map(async (base) => {
const modelList = getModelNameList(base);
const nameList = modelList.map((name) => `${base}/${name}`);
await Promise.all(
nameList.map(async (name) => {
const fileContent = (await import(`./${name}`))?.default as SystemModelItemType;
const dbModel = dbModels.find((item) => item.model === fileContent.model);
const model: any = {
...fileContent,
...dbModel?.metadata,
type: dbModel?.metadata?.type || base,
isCustom: false
};
pushModel(model);
})
);
})
);
// Custom model
dbModels.forEach((dbModel) => {
if (global.systemModelList.find((item) => item.model === dbModel.model)) return;
pushModel({
...dbModel.metadata,
isCustom: true
});
});
console.log('Load models success', JSON.stringify(global.systemActiveModelList, null, 2));
};
export const watchSystemModelUpdate = () => {
const changeStream = MongoSystemModel.watch();
changeStream.on(
'change',
debounce(async () => {
try {
await loadSystemModels(true);
} catch (error) {}
}, 500)
);
};

View File

@@ -1,11 +1,11 @@
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../config';
import { countPromptTokens } from '../../../common/string/tiktoken/index';
import { EmbeddingTypeEnm } from '@fastgpt/global/core/ai/constants';
import { addLog } from '../../../common/system/log';
type GetVectorProps = {
model: VectorModelItemType;
model: EmbeddingModelItemType;
input: string;
type?: `${EmbeddingTypeEnm}`;
};
@@ -24,13 +24,23 @@ export async function getVectorsByText({ model, input, type }: GetVectorProps) {
// input text to vector
const result = await ai.embeddings
.create({
...model.defaultConfig,
...(type === EmbeddingTypeEnm.db && model.dbConfig),
...(type === EmbeddingTypeEnm.query && model.queryConfig),
model: model.model,
input: [input]
})
.create(
{
...model.defaultConfig,
...(type === EmbeddingTypeEnm.db && model.dbConfig),
...(type === EmbeddingTypeEnm.query && model.queryConfig),
model: model.model,
input: [input]
},
model.requestUrl && model.requestAuth
? {
path: model.requestUrl,
headers: {
Authorization: `Bearer ${model.requestAuth}`
}
}
: {}
)
.then(async (res) => {
if (!res.data) {
addLog.error('Embedding API is not responding', res);

View File

@@ -1,51 +1,54 @@
import { SystemModelItemType } from './type';
export const getFirstLLMModel = () => {
return Array.from(global.llmModelMap.values())[0];
};
export const getLLMModel = (model?: string) => {
return (
global.llmModels.find((item) => item.model === model || item.name === model) ??
global.llmModels[0]
);
if (!model) return getFirstLLMModel();
return global.llmModelMap.get(model) || getFirstLLMModel();
};
export const getDatasetModel = (model?: string) => {
return (
global.llmModels
Array.from(global.llmModelMap.values())
?.filter((item) => item.datasetProcess)
?.find((item) => item.model === model || item.name === model) ?? global.llmModels[0]
?.find((item) => item.model === model || item.name === model) ?? getFirstLLMModel()
);
};
export const getVectorModel = (model?: string) => {
return (
global.vectorModels.find((item) => item.model === model || item.name === model) ||
global.vectorModels[0]
);
export const getFirstEmbeddingModel = () => Array.from(global.embeddingModelMap.values())[0];
export const getEmbeddingModel = (model?: string) => {
if (!model) return getFirstEmbeddingModel();
return global.embeddingModelMap.get(model) || getFirstEmbeddingModel();
};
export function getAudioSpeechModel(model?: string) {
return (
global.audioSpeechModels.find((item) => item.model === model || item.name === model) ||
global.audioSpeechModels[0]
);
export const getFirstTTSModel = () => Array.from(global.ttsModelMap.values())[0];
export function getTTSModel(model?: string) {
if (!model) return getFirstTTSModel();
return global.ttsModelMap.get(model) || getFirstTTSModel();
}
export function getWhisperModel(model?: string) {
return global.whisperModel;
export const getFirstSTTModel = () => Array.from(global.sttModelMap.values())[0];
export function getSTTModel(model?: string) {
if (!model) return getFirstSTTModel();
return global.sttModelMap.get(model) || getFirstSTTModel();
}
export const getFirstReRankModel = () => Array.from(global.reRankModelMap.values())[0];
export function getReRankModel(model?: string) {
return global.reRankModels.find((item) => item.model === model);
if (!model) return getFirstReRankModel();
return global.reRankModelMap.get(model) || getFirstReRankModel();
}
export enum ModelTypeEnum {
llm = 'llm',
vector = 'vector',
audioSpeech = 'audioSpeech',
whisper = 'whisper',
rerank = 'rerank'
}
export const getModelMap = {
[ModelTypeEnum.llm]: getLLMModel,
[ModelTypeEnum.vector]: getVectorModel,
[ModelTypeEnum.audioSpeech]: getAudioSpeechModel,
[ModelTypeEnum.whisper]: getWhisperModel,
[ModelTypeEnum.rerank]: getReRankModel
export const findAIModel = (model: string): SystemModelItemType | undefined => {
return (
global.llmModelMap.get(model) ||
global.embeddingModelMap.get(model) ||
global.ttsModelMap.get(model) ||
global.sttModelMap.get(model) ||
global.reRankModelMap.get(model)
);
};
export const findModelFromAlldata = (model: string) => {
return global.systemModelList.find((item) => item.model === model);
};

View File

@@ -1,5 +1,6 @@
import { addLog } from '../../../common/system/log';
import { POST } from '../../../common/api/serverRequest';
import { getFirstReRankModel } from '../model';
type PostReRankResponse = {
id: string;
@@ -17,7 +18,7 @@ export function reRankRecall({
query: string;
documents: { id: string; text: string }[];
}): Promise<ReRankCallResult> {
const model = global.reRankModels[0];
const model = getFirstReRankModel();
if (!model || !model?.requestUrl) {
return Promise.reject('no rerank model');

33
packages/service/core/ai/type.d.ts vendored Normal file
View File

@@ -0,0 +1,33 @@
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import {
STTModelType,
ReRankModelItemType,
TTSModelType,
EmbeddingModelItemType,
LLMModelItemType
} from '@fastgpt/global/core/ai/model.d';
export type SystemModelSchemaType = {
_id: string;
model: string;
metadata: SystemModelItemType;
};
export type SystemModelItemType =
| LLMModelItemType
| EmbeddingModelItemType
| TTSModelType
| STTModelType
| ReRankModelItemType;
declare global {
var systemModelList: SystemModelItemType[];
// var systemModelMap: Map<string, SystemModelItemType>;
var llmModelMap: Map<string, LLMModelItemType>;
var embeddingModelMap: Map<string, EmbeddingModelItemType>;
var ttsModelMap: Map<string, TTSModelType>;
var sttModelMap: Map<string, STTModelType>;
var reRankModelMap: Map<string, ReRankModelItemType>;
var systemActiveModelList: SystemModelItemType[];
}

View File

@@ -19,7 +19,7 @@ import { predictDataLimitLength } from '../../../../global/core/dataset/utils';
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
import { createTrainingUsage } from '../../../support/wallet/usage/controller';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { getLLMModel, getVectorModel } from '../../ai/model';
import { getLLMModel, getEmbeddingModel } from '../../ai/model';
import { pushDataListToTrainingQueue } from '../training/controller';
import { MongoImage } from '../../../common/file/image/schema';
import { hashStr } from '@fastgpt/global/common/string/tools';
@@ -93,7 +93,7 @@ export const createCollectionAndInsertData = async ({
tmbId,
appName: usageName,
billSource: UsageSourceEnum.training,
vectorModel: getVectorModel(dataset.vectorModel)?.name,
vectorModel: getEmbeddingModel(dataset.vectorModel)?.name,
agentModel: getLLMModel(dataset.agentModel)?.name,
session
});

View File

@@ -5,7 +5,7 @@ import {
} from '@fastgpt/global/core/dataset/constants';
import { recallFromVectorStore } from '../../../common/vectorStore/controller';
import { getVectorsByText } from '../../ai/embedding';
import { getVectorModel } from '../../ai/model';
import { getEmbeddingModel, getFirstReRankModel } from '../../ai/model';
import { MongoDatasetData } from '../data/schema';
import {
DatasetDataSchemaType,
@@ -67,7 +67,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
/* init params */
searchMode = DatasetSearchModeMap[searchMode] ? searchMode : DatasetSearchModeEnum.embedding;
usingReRank = usingReRank && global.reRankModels.length > 0;
usingReRank = usingReRank && !!getFirstReRankModel();
// Compatible with topk limit
let set = new Set<string>();
@@ -253,7 +253,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
filterCollectionIdList?: string[];
}) => {
const { vectors, tokens } = await getVectorsByText({
model: getVectorModel(model),
model: getEmbeddingModel(model),
input: query,
type: 'query'
});

View File

@@ -7,7 +7,7 @@ import type {
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { simpleText } from '@fastgpt/global/common/string/tools';
import { ClientSession } from '../../../common/mongo';
import { getLLMModel, getVectorModel } from '../../ai/model';
import { getLLMModel, getEmbeddingModel } from '../../ai/model';
import { addLog } from '../../../common/system/log';
import { getCollectionWithDataset } from '../controller';
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
@@ -70,7 +70,7 @@ export async function pushDataListToTrainingQueue({
if (!agentModelData) {
return Promise.reject(`File model ${agentModel} is inValid`);
}
const vectorModelData = getVectorModel(vectorModel);
const vectorModelData = getEmbeddingModel(vectorModel);
if (!vectorModelData) {
return Promise.reject(`Vector model ${vectorModel} is inValid`);
}

View File

@@ -13,7 +13,7 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getLLMModel } from '../../../ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
@@ -22,6 +22,7 @@ import { getHandleId } from '@fastgpt/global/core/workflow/utils';
import { loadRequestMessages } from '../../../chat/utils';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
import { addLog } from '../../../../common/system/log';
import { ModelTypeEnum } from '../../../../../global/core/ai/model';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;

View File

@@ -16,7 +16,7 @@ import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
import { replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getHistories } from '../utils';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getLLMModel } from '../../../ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import json5 from 'json5';
import {
@@ -28,6 +28,7 @@ import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/co
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
import { ModelTypeEnum } from '../../../../../global/core/ai/model';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];

View File

@@ -4,7 +4,7 @@ import type {
DispatchNodeResultType,
RuntimeNodeItemType
} from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
import { getLLMModel } from '../../../../ai/model';
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
@@ -30,6 +30,7 @@ import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { postTextCensor } from '../../../../../common/api/requestPlusApi';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;

View File

@@ -33,7 +33,7 @@ import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { responseWriteController } from '../../../../common/response';
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
import { getLLMModel } from '../../../ai/model';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -47,6 +47,7 @@ import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/syst
import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { i18nT } from '../../../../../web/i18n/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {

View File

@@ -6,7 +6,7 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum, getLLMModel, getVectorModel } from '../../../ai/model';
import { getLLMModel, getEmbeddingModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -18,6 +18,7 @@ import { checkTeamReRankPermission } from '../../../../support/permission/teamLi
import { MongoDataset } from '../../../dataset/schema';
import { i18nT } from '../../../../../web/i18n/utils';
import { filterDatasetsByTmbId } from '../../../dataset/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
@@ -110,7 +111,7 @@ export async function dispatchDatasetSearch(
// console.log(concatQueries, rewriteQuery, aiExtensionResult);
// get vector
const vectorModel = getVectorModel(
const vectorModel = getEmbeddingModel(
(await MongoDataset.findById(datasets[0].datasetId, 'vectorModel').lean())?.vectorModel
);
@@ -138,7 +139,7 @@ export async function dispatchDatasetSearch(
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.vector
modelType: ModelTypeEnum.embedding
});
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,

View File

@@ -2,12 +2,13 @@ import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { ModelTypeEnum, getLLMModel } from '../../../../core/ai/model';
import { getLLMModel } from '../../../../core/ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { queryExtension } from '../../../../core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;

View File

@@ -5,6 +5,10 @@ import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
import { AuthModeType, AuthResponseType } from '../type';
import { NullPermission } from '@fastgpt/global/support/permission/constant';
import { TeamPermission } from '@fastgpt/global/support/permission/user/controller';
import { authCert } from '../auth/common';
import { MongoUser } from '../../user/schema';
import { ERROR_ENUM } from '@fastgpt/global/common/error/errorCode';
import { ApiRequestProps } from '../../../type/next';
/* auth user role */
export async function authUserPer(props: AuthModeType): Promise<
@@ -34,3 +38,19 @@ export async function authUserPer(props: AuthModeType): Promise<
tmb
};
}
export const authSystemAdmin = async ({ req }: { req: ApiRequestProps }) => {
try {
const result = await authCert({ req, authToken: true });
const user = await MongoUser.findOne({
_id: result.userId
});
if (user && user.username !== 'root') {
return Promise.reject(ERROR_ENUM.unAuthorization);
}
return result;
} catch (error) {
throw error;
}
};

View File

@@ -1,5 +1,5 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getModelMap } from '../../../core/ai/model';
import { findAIModel } from '../../../core/ai/model';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
export const formatModelChars2Points = ({
model,
@@ -14,7 +14,7 @@ export const formatModelChars2Points = ({
modelType: `${ModelTypeEnum}`;
multiple?: number;
}) => {
const modelData = getModelMap?.[modelType]?.(model) as LLMModelItemType;
const modelData = findAIModel(model);
if (!modelData) {
return {
totalPoints: 0,

View File

@@ -1,9 +1,9 @@
import { FastGPTFeConfigsType, SystemEnvType } from '@fastgpt/global/common/system/types';
import {
AudioSpeechModelType,
TTSModelType,
ReRankModelItemType,
STTModelType,
VectorModelItemType,
EmbeddingModelItemType,
LLMModelItemType
} from '@fastgpt/global/core/ai/model.d';
import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type';
@@ -12,17 +12,11 @@ import { Worker } from 'worker_threads';
declare global {
var systemInitBufferId: string | undefined;
var systemVersion: string;
var feConfigs: FastGPTFeConfigsType;
var systemEnv: SystemEnvType;
var subPlans: SubPlanType | undefined;
var llmModels: LLMModelItemType[];
var llmModelPriceType: 'IO' | 'Tokens';
var vectorModels: VectorModelItemType[];
var audioSpeechModels: AudioSpeechModelType[];
var whisperModel: STTModelType;
var reRankModels: ReRankModelItemType[];
var workerPoll: Record<WorkerNameEnum, WorkerPool>;
}