V4.8.20 feature (#3686)

* Aiproxy (#3649)

* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile

* feat: usage filter & export & dashbord (#3538)

* feat: usage filter & export & dashbord

* adjust ui

* fix tmb scroll

* fix code & selecte all

* merge

* perf: usages list;perf: move components (#3654)

* perf: usages list

* team sub plan load

* perf: usage dashboard code

* perf: dashboard ui

* perf: move components

* add default model config (#3653)

* 4.8.20 test (#3656)

* provider

* perf: model config

* model perf (#3657)

* fix: model

* dataset quote

* perf: model config

* model tag

* doubao model config

* perf: config model

* feat: model test

* fix: POST 500 error on dingtalk bot (#3655)

* feat: default model (#3662)

* move model config

* feat: default model

* fix: false triggerd org selection (#3661)

* export usage csv i18n (#3660)

* export usage csv i18n

* fix build

* feat: markdown extension (#3663)

* feat: markdown extension

* media cros

* rerank test

* default price

* perf: default model

* fix: cannot custom provider

* fix: default model select

* update bg

* perf: default model selector

* fix: usage export

* i18n

* fix: rerank

* update init extension

* perf: ip limit check

* doubao model order

* web default modle

* perf: tts selector

* perf: tts error

* qrcode package

* reload buffer (#3665)

* reload buffer

* reload buffer

* tts selector

* fix: err tip (#3666)

* fix: err tip

* perf: training queue

* doc

* fix interactive edge (#3659)

* fix interactive edge

* fix

* comment

* add gemini model

* fix: chat model select

* perf: supplement assistant empty response (#3669)

* perf: supplement assistant empty response

* check array

* perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n

* fix: stream response (#3682)

* perf: supplement assistant empty response

* check array

* fix: stream response

* fix: model config cannot set to null

* fix: reasoning response (#3684)

* perf: supplement assistant empty response

* check array

* fix: reasoning response

* fix: reasoning response

* doc (#3685)

* perf: supplement assistant empty response

* check array

* doc

* lock

* animation

* update doc

* update compose

* doc

* doc

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
Archer
2025-02-05 00:10:47 +08:00
committed by GitHub
parent c393002f1d
commit db2c0a0bdb
496 changed files with 9031 additions and 4726 deletions

View File

@@ -0,0 +1,77 @@
import { readConfigData } from '@/service/common/system';
import { NextAPI } from '@/service/middleware/entry';
import {
getFastGPTConfigFromDB,
updateFastGPTConfigBuffer
} from '@fastgpt/service/common/system/config/controller';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { NextApiRequest, NextApiResponse } from 'next';
import json5 from 'json5';
import { FastGPTConfigFileType } from '@fastgpt/global/common/system/types';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
/*
简单版迁移:直接升级到最新镜像,会去除 MongoDatasetData 里的索引。直接执行这个脚本。
无缝迁移:
1. 移动 User 表中的 avatar 字段到 TeamMember 表中。
*/
async function handler(req: NextApiRequest, res: NextApiResponse) {
await authCert({ req, authRoot: true });
// load config
const [{ config: dbConfig }, fileConfig] = await Promise.all([
getFastGPTConfigFromDB(),
readConfigData('config.json')
]);
const fileRes = json5.parse(fileConfig) as FastGPTConfigFileType;
const llmModels = dbConfig.llmModels || fileRes.llmModels || [];
const vectorModels = dbConfig.vectorModels || fileRes.vectorModels || [];
const reRankModels = dbConfig.reRankModels || fileRes.reRankModels || [];
const audioSpeechModels = dbConfig.audioSpeechModels || fileRes.audioSpeechModels || [];
const whisperModel = dbConfig.whisperModel || fileRes.whisperModel;
const list = [
...llmModels.map((item) => ({
...item,
type: ModelTypeEnum.llm
})),
...vectorModels.map((item) => ({
...item,
type: ModelTypeEnum.embedding
})),
...reRankModels.map((item) => ({
...item,
type: ModelTypeEnum.rerank
})),
...audioSpeechModels.map((item) => ({
...item,
type: ModelTypeEnum.tts
})),
{
...whisperModel,
type: ModelTypeEnum.stt
}
];
for await (const item of list) {
try {
await MongoSystemModel.updateOne(
{ model: item.model },
{ $set: { model: item.model, metadata: { ...item, isActive: true } } },
{ upsert: true }
);
} catch (error) {
console.log(error);
}
}
await loadSystemModels(true);
await updateFastGPTConfigBuffer();
return { success: true };
}
export default NextAPI(handler);

View File

@@ -1,10 +1,29 @@
import type { NextApiResponse } from 'next';
import { ApiRequestProps } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { InitDateResponse } from '@/global/common/api/systemRes';
import { SystemModelItemType } from '@fastgpt/service/core/ai/type';
async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: NextApiResponse) {
async function handler(
req: ApiRequestProps<{}, { bufferId?: string }>,
res: NextApiResponse
): Promise<InitDateResponse> {
const { bufferId } = req.query;
const activeModelList = global.systemActiveModelList.map((model) => ({
...model,
customCQPrompt: undefined,
customExtractPrompt: undefined,
defaultSystemChatPrompt: undefined,
fieldMap: undefined,
defaultConfig: undefined,
weight: undefined,
dbConfig: undefined,
queryConfig: undefined,
requestUrl: undefined,
requestAuth: undefined
})) as SystemModelItemType[];
// If bufferId is the same as the current bufferId, return directly
if (bufferId && global.systemInitBufferId && global.systemInitBufferId === bufferId) {
return {
@@ -17,22 +36,9 @@ async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: Nex
bufferId: global.systemInitBufferId,
feConfigs: global.feConfigs,
subPlans: global.subPlans,
llmModels: global.llmModels.map((model) => ({
...model,
customCQPrompt: '',
customExtractPrompt: '',
defaultSystemChatPrompt: ''
})),
vectorModels: global.vectorModels,
reRankModels:
global.reRankModels?.map((item) => ({
...item,
requestUrl: '',
requestAuth: ''
})) || [],
whisperModel: global.whisperModel,
audioSpeechModels: global.audioSpeechModels,
systemVersion: global.systemVersion || '0.0.0'
systemVersion: global.systemVersion || '0.0.0',
activeModelList,
defaultModels: global.systemDefaultModel
};
}

View File

@@ -4,7 +4,7 @@ import { addLog } from '@fastgpt/service/common/system/log';
import { TrackEnum } from '@fastgpt/global/common/middle/tracks/constants';
import { TrackModel } from '@fastgpt/service/common/middle/tracks/schema';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { useReqFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
export type pushQuery = {};
@@ -38,7 +38,7 @@ async function handler(
return TrackModel.create(data);
}
export default NextAPI(useReqFrequencyLimit(1, 5), handler);
export default NextAPI(useIPFrequencyLimit({ id: 'push-tracks', seconds: 1, limit: 5 }), handler);
export const config = {
api: {

View File

@@ -16,6 +16,7 @@ import { MongoTeamMember } from '@fastgpt/service/support/user/team/teamMemberSc
import { TeamMemberRoleEnum } from '@fastgpt/global/support/user/team/constant';
import { ChatErrEnum } from '@fastgpt/global/common/error/code/chat';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { getDefaultLLMModel } from '@fastgpt/service/core/ai/model';
async function handler(
req: ApiRequestProps<
@@ -35,7 +36,7 @@ async function handler(
authApiKey: true
});
const qgModel = global.llmModels[0];
const qgModel = getDefaultLLMModel();
const { result, inputTokens, outputTokens } = await createQuestionGuide({
messages,
@@ -47,6 +48,7 @@ async function handler(
});
pushQuestionGuideUsage({
model: qgModel.model,
inputTokens,
outputTokens,
teamId,

View File

@@ -9,6 +9,7 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { getChatItems } from '@fastgpt/service/core/chat/controller';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { getAppLatestVersion } from '@fastgpt/service/core/app/version/controller';
import { getDefaultLLMModel } from '@fastgpt/service/core/ai/model';
export type CreateQuestionGuideParams = OutLinkChatAuthProps & {
appId: string;
@@ -50,7 +51,7 @@ async function handler(req: ApiRequestProps<CreateQuestionGuideParams>, res: Nex
});
const messages = chats2GPTMessages({ messages: histories, reserveId: false });
const qgModel = questionGuide?.model || global.llmModels[0].model;
const qgModel = questionGuide?.model || getDefaultLLMModel().model;
const { result, inputTokens, outputTokens } = await createQuestionGuide({
messages,
@@ -59,6 +60,7 @@ async function handler(req: ApiRequestProps<CreateQuestionGuideParams>, res: Nex
});
pushQuestionGuideUsage({
model: qgModel,
inputTokens,
outputTokens,
teamId,

View File

@@ -0,0 +1,42 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { findModelFromAlldata } from '@fastgpt/service/core/ai/model';
import { updateFastGPTConfigBuffer } from '@fastgpt/service/common/system/config/controller';
import { loadSystemModels, updatedReloadSystemModel } from '@fastgpt/service/core/ai/config/utils';
export type deleteQuery = {
model: string;
};
export type deleteBody = {};
export type deleteResponse = {};
async function handler(
req: ApiRequestProps<deleteBody, deleteQuery>,
res: ApiResponseType<any>
): Promise<deleteResponse> {
await authSystemAdmin({ req });
const { model } = req.query;
const modelData = findModelFromAlldata(model);
if (!modelData) {
return Promise.reject('Model not found');
}
if (!modelData.isCustom) {
return Promise.reject('System model cannot be deleted');
}
await MongoSystemModel.deleteOne({ model });
await updatedReloadSystemModel();
return {};
}
export default NextAPI(handler);

View File

@@ -0,0 +1,29 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { SystemModelItemType } from '@fastgpt/service/core/ai/type';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { findModelFromAlldata } from '@fastgpt/service/core/ai/model';
export type detailQuery = {
model: string;
};
export type detailBody = {};
export type detailResponse = SystemModelItemType;
async function handler(
req: ApiRequestProps<detailBody, detailQuery>,
res: ApiResponseType<any>
): Promise<detailResponse> {
await authSystemAdmin({ req });
const { model } = req.query;
const modelItem = findModelFromAlldata(model);
if (!modelItem) {
return Promise.reject('Model not found');
}
return modelItem;
}
export default NextAPI(handler);

View File

@@ -0,0 +1,29 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
export type getConfigJsonQuery = {};
export type getConfigJsonBody = {};
export type getConfigJsonResponse = {};
async function handler(
req: ApiRequestProps<getConfigJsonBody, getConfigJsonQuery>,
res: ApiResponseType<any>
): Promise<getConfigJsonResponse> {
await authSystemAdmin({ req });
const data = await MongoSystemModel.find({}).lean();
return JSON.stringify(
data.map((item) => ({
model: item.model,
metadata: item.metadata
})),
null,
2
);
}
export default NextAPI(handler);

View File

@@ -0,0 +1,22 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { getSystemModelConfig } from '@fastgpt/service/core/ai/config/utils';
import { SystemModelItemType } from '@fastgpt/service/core/ai/type';
export type getDefaultQuery = { model: string };
export type getDefaultBody = {};
async function handler(
req: ApiRequestProps<getDefaultBody, getDefaultQuery>,
res: ApiResponseType<any>
): Promise<SystemModelItemType> {
await authSystemAdmin({ req });
const model = req.query.model;
return getSystemModelConfig(model);
}
export default NextAPI(handler);

View File

@@ -0,0 +1,57 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { ModelProviderIdType } from '@fastgpt/global/core/ai/provider';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
export type listQuery = {};
export type listBody = {};
export type listResponse = {
type: `${ModelTypeEnum}`;
name: string;
avatar: string | undefined;
provider: ModelProviderIdType;
model: string;
charsPointsPrice?: number;
inputPrice?: number;
outputPrice?: number;
isActive: boolean;
isCustom: boolean;
// Tag
contextToken?: number;
vision?: boolean;
toolChoice?: boolean;
}[];
async function handler(
req: ApiRequestProps<listBody, listQuery>,
res: ApiResponseType<any>
): Promise<listResponse> {
await authSystemAdmin({ req });
// Read db
return global.systemModelList.map((model) => ({
type: model.type,
provider: model.provider,
model: model.model,
name: model.name,
avatar: model.avatar,
charsPointsPrice: model.charsPointsPrice,
inputPrice: model.inputPrice,
outputPrice: model.outputPrice,
isActive: model.isActive ?? false,
isCustom: model.isCustom ?? false,
// Tag
contextToken:
'maxContext' in model ? model.maxContext : 'maxToken' in model ? model.maxToken : undefined,
vision: 'vision' in model ? model.vision : undefined,
toolChoice: 'toolChoice' in model ? model.toolChoice : undefined
}));
}
export default NextAPI(handler);

View File

@@ -0,0 +1,127 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { findModelFromAlldata, getReRankModel } from '@fastgpt/service/core/ai/model';
import {
EmbeddingModelItemType,
LLMModelItemType,
ReRankModelItemType,
STTModelType,
TTSModelType
} from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import { addLog } from '@fastgpt/service/common/system/log';
import { getVectorsByText } from '@fastgpt/service/core/ai/embedding';
import { reRankRecall } from '@fastgpt/service/core/ai/rerank';
import { aiTranscriptions } from '@fastgpt/service/core/ai/audio/transcriptions';
import { isProduction } from '@fastgpt/global/common/system/constants';
import * as fs from 'fs';
export type testQuery = { model: string };
export type testBody = {};
export type testResponse = any;
async function handler(
req: ApiRequestProps<testBody, testQuery>,
res: ApiResponseType<any>
): Promise<testResponse> {
await authSystemAdmin({ req });
const { model } = req.query;
const modelData = findModelFromAlldata(model);
if (!modelData) return Promise.reject('Model not found');
if (modelData.type === 'llm') {
return testLLMModel(modelData);
}
if (modelData.type === 'embedding') {
return testEmbeddingModel(modelData);
}
if (modelData.type === 'tts') {
return testTTSModel(modelData);
}
if (modelData.type === 'stt') {
return testSTTModel(modelData);
}
if (modelData.type === 'rerank') {
return testReRankModel(modelData);
}
return Promise.reject('Model type not supported');
}
export default NextAPI(handler);
const testLLMModel = async (model: LLMModelItemType) => {
const ai = getAIApi({});
const response = await ai.chat.completions.create(
{
model: model.model,
messages: [{ role: 'user', content: 'hi' }],
stream: false,
max_tokens: 10
},
{
...(model.requestUrl ? { path: model.requestUrl } : {}),
headers: {
...(model.requestAuth ? { Authorization: `Bearer ${model.requestAuth}` } : {})
}
}
);
const responseText = response.choices?.[0]?.message?.content;
if (!responseText) {
return Promise.reject('Model response empty');
}
addLog.info(`Model test response: ${responseText}`);
};
const testEmbeddingModel = async (model: EmbeddingModelItemType) => {
return getVectorsByText({
input: 'Hi',
model
});
};
const testTTSModel = async (model: TTSModelType) => {
const ai = getAIApi();
await ai.audio.speech.create(
{
model: model.model,
voice: model.voices[0]?.value as any,
input: 'Hi',
response_format: 'mp3',
speed: 1
},
model.requestUrl && model.requestAuth
? {
path: model.requestUrl,
headers: {
Authorization: `Bearer ${model.requestAuth}`
}
}
: {}
);
};
const testSTTModel = async (model: STTModelType) => {
const path = isProduction ? '/app/data/test.mp3' : 'data/test.mp3';
const { text } = await aiTranscriptions({
model: model.model,
fileStream: fs.createReadStream(path)
});
addLog.info(`STT result: ${text}`);
};
const testReRankModel = async (model: ReRankModelItemType) => {
await reRankRecall({
model,
query: 'Hi',
documents: [{ id: '1', text: 'Hi' }]
});
};

View File

@@ -0,0 +1,64 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { findModelFromAlldata } from '@fastgpt/service/core/ai/model';
import { updatedReloadSystemModel } from '@fastgpt/service/core/ai/config/utils';
export type updateQuery = {};
export type updateBody = {
model: string;
metadata?: Record<string, any>;
};
export type updateResponse = {};
async function handler(
req: ApiRequestProps<updateBody, updateQuery>,
res: ApiResponseType<any>
): Promise<updateResponse> {
await authSystemAdmin({ req });
let { model, metadata } = req.body;
if (!model) return Promise.reject(new Error('model is required'));
model = model.trim();
const dbModel = await MongoSystemModel.findOne({ model }).lean();
const modelData = findModelFromAlldata(model);
const metadataConcat: Record<string, any> = {
...modelData, // system config
...dbModel?.metadata, // db config
...metadata // user config
};
delete metadataConcat.avatar;
delete metadataConcat.isCustom;
// 强制赋值 model避免脏的 metadata 覆盖真实 model
metadataConcat.model = model;
metadataConcat.name = metadataConcat?.name?.trim();
// Delete null value
Object.keys(metadataConcat).forEach((key) => {
if (metadataConcat[key] === null || metadataConcat[key] === undefined) {
delete metadataConcat[key];
}
});
await MongoSystemModel.updateOne(
{ model },
{
model,
metadata: metadataConcat
},
{
upsert: true
}
);
await updatedReloadSystemModel();
return {};
}
export default NextAPI(handler);

View File

@@ -0,0 +1,75 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { loadSystemModels, updatedReloadSystemModel } from '@fastgpt/service/core/ai/config/utils';
import { updateFastGPTConfigBuffer } from '@fastgpt/service/common/system/config/controller';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
export type updateDefaultQuery = {};
export type updateDefaultBody = {
[ModelTypeEnum.llm]?: string;
[ModelTypeEnum.embedding]?: string;
[ModelTypeEnum.tts]?: string;
[ModelTypeEnum.stt]?: string;
[ModelTypeEnum.rerank]?: string;
};
export type updateDefaultResponse = {};
async function handler(
req: ApiRequestProps<updateDefaultBody, updateDefaultQuery>,
res: ApiResponseType<any>
): Promise<updateDefaultResponse> {
await authSystemAdmin({ req });
const { llm, embedding, tts, stt, rerank } = req.body;
await mongoSessionRun(async (session) => {
await MongoSystemModel.updateMany({}, { $unset: { 'metadata.isDefault': 1 } }, { session });
if (llm) {
await MongoSystemModel.updateOne(
{ model: llm },
{ $set: { 'metadata.isDefault': true } },
{ session }
);
}
if (embedding) {
await MongoSystemModel.updateOne(
{ model: embedding },
{ $set: { 'metadata.isDefault': true } },
{ session }
);
}
if (tts) {
await MongoSystemModel.updateOne(
{ model: tts },
{ $set: { 'metadata.isDefault': true } },
{ session }
);
}
if (stt) {
await MongoSystemModel.updateOne(
{ model: stt },
{ $set: { 'metadata.isDefault': true } },
{ session }
);
}
if (rerank) {
await MongoSystemModel.updateOne(
{ model: rerank },
{ $set: { 'metadata.isDefault': true } },
{ session }
);
}
});
await updatedReloadSystemModel();
return {};
}
export default NextAPI(handler);

View File

@@ -0,0 +1,62 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { SystemModelSchemaType } from '@fastgpt/service/core/ai/type';
import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema';
import { updatedReloadSystemModel } from '@fastgpt/service/core/ai/config/utils';
export type updateWithJsonQuery = {};
export type updateWithJsonBody = {
config: string;
};
export type updateWithJsonResponse = {};
async function handler(
req: ApiRequestProps<updateWithJsonBody, updateWithJsonQuery>,
res: ApiResponseType<any>
): Promise<updateWithJsonResponse> {
await authSystemAdmin({ req });
const { config } = req.body;
const data = JSON.parse(config) as SystemModelSchemaType[];
// Check
for (const item of data) {
if (!item.model || !item.metadata || typeof item.metadata !== 'object') {
return Promise.reject('Invalid model or metadata');
}
if (!item.metadata.type) {
return Promise.reject(`${item.model} metadata.type is required`);
}
if (!item.metadata.model) {
return Promise.reject(`${item.model} metadata.model is required`);
}
if (!item.metadata.provider) {
return Promise.reject(`${item.model} metadata.provider is required`);
}
item.metadata.model = item.model.trim();
if (!item.metadata.name) {
item.metadata.name = item.model;
}
}
await mongoSessionRun(async (session) => {
await MongoSystemModel.deleteMany({}, { session });
for await (const item of data) {
await MongoSystemModel.updateOne(
{ model: item.model },
{ $set: { model: item.model, metadata: item.metadata } },
{ upsert: true, session }
);
}
});
await updatedReloadSystemModel();
return {};
}
export default NextAPI(handler);

View File

@@ -6,7 +6,7 @@ import { text2Speech } from '@fastgpt/service/core/ai/audio/speech';
import { pushAudioSpeechUsage } from '@/service/support/wallet/usage/push';
import { authChatCrud } from '@/service/support/permission/auth/chat';
import { authType2UsageSource } from '@/service/support/wallet/usage/utils';
import { getAudioSpeechModel } from '@fastgpt/service/core/ai/model';
import { getTTSModel } from '@fastgpt/service/core/ai/model';
import { MongoTTSBuffer } from '@fastgpt/service/common/buffer/tts/schema';
import { ApiRequestProps } from '@fastgpt/service/type/next';
@@ -31,17 +31,19 @@ async function handler(req: ApiRequestProps<GetChatSpeechProps>, res: NextApiRes
...req.body
});
const ttsModel = getAudioSpeechModel(ttsConfig.model);
const ttsModel = getTTSModel(ttsConfig.model);
const voiceData = ttsModel.voices?.find((item) => item.value === ttsConfig.voice);
if (!voiceData) {
throw new Error('voice not found');
}
const bufferId = `${ttsModel.model}-${ttsConfig.voice}`;
/* get audio from buffer */
const ttsBuffer = await MongoTTSBuffer.findOne(
{
bufferId: voiceData.bufferId,
bufferId,
text: JSON.stringify({ text: input, speed: ttsConfig.speed })
},
'buffer'
@@ -70,11 +72,21 @@ async function handler(req: ApiRequestProps<GetChatSpeechProps>, res: NextApiRes
});
/* create buffer */
await MongoTTSBuffer.create({
bufferId: voiceData.bufferId,
text: JSON.stringify({ text: input, speed: ttsConfig.speed }),
buffer
});
await MongoTTSBuffer.create(
{
bufferId,
text: JSON.stringify({ text: input, speed: ttsConfig.speed }),
buffer
},
ttsModel.requestUrl && ttsModel.requestAuth
? {
path: ttsModel.requestUrl,
headers: {
Authorization: `Bearer ${ttsModel.requestAuth}`
}
}
: {}
);
} catch (error) {}
},
onError: (err) => {

View File

@@ -1,6 +1,6 @@
import type { NextApiRequest } from 'next';
import { MongoDataset } from '@fastgpt/service/core/dataset/schema';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import type { DatasetSimpleItemType } from '@fastgpt/global/core/dataset/type.d';
import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
@@ -31,7 +31,7 @@ async function handler(req: NextApiRequest): Promise<DatasetSimpleItemType[]> {
_id: item._id,
avatar: item.avatar,
name: item.name,
vectorModel: getVectorModel(item.vectorModel)
vectorModel: getEmbeddingModel(item.vectorModel)
}));
}

View File

@@ -2,7 +2,12 @@ import { MongoDataset } from '@fastgpt/service/core/dataset/schema';
import type { CreateDatasetParams } from '@/global/core/dataset/api.d';
import { authUserPer } from '@fastgpt/service/support/permission/user/auth';
import { DatasetTypeEnum } from '@fastgpt/global/core/dataset/constants';
import { getLLMModel, getVectorModel, getDatasetModel } from '@fastgpt/service/core/ai/model';
import {
getLLMModel,
getEmbeddingModel,
getDatasetModel,
getDefaultEmbeddingModel
} from '@fastgpt/service/core/ai/model';
import { checkTeamDatasetLimit } from '@fastgpt/service/support/permission/teamLimit';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { NextAPI } from '@/service/middleware/entry';
@@ -27,7 +32,7 @@ async function handler(
intro,
type = DatasetTypeEnum.dataset,
avatar,
vectorModel = global.vectorModels[0].model,
vectorModel = getDefaultEmbeddingModel().model,
agentModel = getDatasetModel().model,
apiServer,
feishuServer,
@@ -56,7 +61,7 @@ async function handler(
]);
// check model valid
const vectorModelStore = getVectorModel(vectorModel);
const vectorModelStore = getEmbeddingModel(vectorModel);
const agentModelStore = getLLMModel(agentModel);
if (!vectorModelStore || !agentModelStore) {
return Promise.reject(DatasetErrEnum.invalidVectorModelOrQAModel);

View File

@@ -0,0 +1,34 @@
import type { NextApiRequest } from 'next';
import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { authDatasetData } from '@fastgpt/service/support/permission/dataset/auth';
import { CollectionWithDatasetType } from '@fastgpt/global/core/dataset/type';
export type GetQuoteDataResponse = {
collection: CollectionWithDatasetType;
q: string;
a: string;
};
async function handler(req: NextApiRequest): Promise<GetQuoteDataResponse> {
const { id: dataId } = req.query as {
id: string;
};
// 凭证校验
const { datasetData, collection } = await authDatasetData({
req,
authToken: true,
authApiKey: true,
dataId,
per: ReadPermissionVal
});
return {
collection,
q: datasetData.q,
a: datasetData.a
};
}
export default NextAPI(handler);

View File

@@ -4,7 +4,7 @@
*/
import type { NextApiRequest } from 'next';
import { countPromptTokens } from '@fastgpt/service/common/string/tiktoken/index';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { hasSameValue } from '@/service/core/dataset/data/utils';
import { insertData2Dataset } from '@/service/core/dataset/data/controller';
import { authDatasetCollection } from '@fastgpt/service/support/permission/dataset/auth';
@@ -59,7 +59,7 @@ async function handler(req: NextApiRequest) {
// token check
const token = await countPromptTokens(formatQ + formatA, '');
const vectorModelData = getVectorModel(vectorModel);
const vectorModelData = getEmbeddingModel(vectorModel);
if (token > vectorModelData.maxToken) {
return Promise.reject('Q Over Tokens');

View File

@@ -1,4 +1,4 @@
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { getLLMModel, getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { NextAPI } from '@/service/middleware/entry';
@@ -50,7 +50,7 @@ async function handler(req: ApiRequestProps<Query>): Promise<DatasetItemType> {
}
: undefined,
permission,
vectorModel: getVectorModel(dataset.vectorModel),
vectorModel: getEmbeddingModel(dataset.vectorModel),
agentModel: getLLMModel(dataset.agentModel)
};
}

View File

@@ -18,7 +18,7 @@ import { getGroupsByTmbId } from '@fastgpt/service/support/permission/memberGrou
import { concatPer } from '@fastgpt/service/support/permission/controller';
import { getOrgIdSetWithParentByTmbId } from '@fastgpt/service/support/permission/org/controllers';
import { addSourceMember } from '@fastgpt/service/support/user/utils';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
export type GetDatasetListBody = {
parentId: ParentIdType;
@@ -172,7 +172,7 @@ async function handler(req: ApiRequestProps<GetDatasetListBody>) {
name: dataset.name,
intro: dataset.intro,
type: dataset.type,
vectorModel: getVectorModel(dataset.vectorModel),
vectorModel: getEmbeddingModel(dataset.vectorModel),
inheritPermission: dataset.inheritPermission,
tmbId: dataset.tmbId,
updateTime: dataset.updateTime,

View File

@@ -14,7 +14,7 @@ import {
import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
import { useReqFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
async function handler(req: NextApiRequest) {
const {
@@ -100,4 +100,4 @@ async function handler(req: NextApiRequest) {
};
}
export default NextAPI(useReqFrequencyLimit(1, 15), handler);
export default NextAPI(useIPFrequencyLimit({ id: 'search-test', seconds: 1, limit: 15 }), handler);

View File

@@ -6,7 +6,7 @@ import { MongoDatasetData } from '@fastgpt/service/core/dataset/data/schema';
import { MongoDatasetTraining } from '@fastgpt/service/core/dataset/training/schema';
import { createTrainingUsage } from '@fastgpt/service/support/wallet/usage/controller';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { getLLMModel, getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { ApiRequestProps } from '@fastgpt/service/type/next';
import { OwnerPermissionVal } from '@fastgpt/global/support/permission/constant';
@@ -49,7 +49,7 @@ async function handler(req: ApiRequestProps<rebuildEmbeddingBody>): Promise<Resp
tmbId,
appName: '切换索引模型',
billSource: UsageSourceEnum.training,
vectorModel: getVectorModel(dataset.vectorModel)?.name,
vectorModel: getEmbeddingModel(dataset.vectorModel)?.name,
agentModel: getLLMModel(dataset.agentModel)?.name
});

View File

@@ -18,7 +18,10 @@ async function handler(
// send to pro
const { token } = req.query;
const result = await POST<any>(`support/outLink/dingtalk/${token}`, req.body, {
headers: req.headers as any
headers: {
timestamp: (req.headers.timestamp as string) ?? '',
sign: (req.headers.sign as string) ?? ''
}
});
return result;

View File

@@ -5,7 +5,7 @@ import { getUserDetail } from '@fastgpt/service/support/user/controller';
import type { PostLoginProps } from '@fastgpt/global/support/user/api.d';
import { UserStatusEnum } from '@fastgpt/global/support/user/constant';
import { NextAPI } from '@/service/middleware/entry';
import { useReqFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { pushTrack } from '@fastgpt/service/common/middle/tracks/utils';
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
import { UserErrEnum } from '@fastgpt/global/common/error/code/user';
@@ -70,4 +70,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
};
}
export default NextAPI(useReqFrequencyLimit(120, 10, true), handler);
export default NextAPI(
useIPFrequencyLimit({ id: 'login-by-password', seconds: 120, limit: 10, force: true }),
handler
);

View File

@@ -4,14 +4,16 @@ import { getTeamPlanStatus } from '@fastgpt/service/support/wallet/sub/utils';
import { NextAPI } from '@/service/middleware/entry';
async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
const { teamId } = await authCert({
req,
authToken: true
});
try {
const { teamId } = await authCert({
req,
authToken: true
});
return getTeamPlanStatus({
teamId
});
return getTeamPlanStatus({
teamId
});
} catch (error) {}
}
export default NextAPI(handler);

View File

@@ -1,7 +1,7 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { CreateTrainingUsageProps } from '@fastgpt/global/support/wallet/usage/api.d';
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { getLLMModel, getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { createTrainingUsage } from '@fastgpt/service/support/wallet/usage/controller';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
@@ -23,7 +23,7 @@ async function handler(req: NextApiRequest) {
tmbId,
appName: name,
billSource: UsageSourceEnum.training,
vectorModel: getVectorModel(dataset.vectorModel).name,
vectorModel: getEmbeddingModel(dataset.vectorModel).name,
agentModel: getLLMModel(dataset.agentModel).name
});

View File

@@ -8,7 +8,8 @@ import { authChatCrud } from '@/service/support/permission/auth/chat';
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { NextAPI } from '@/service/middleware/entry';
import { aiTranscriptions } from '@fastgpt/service/core/ai/audio/transcriptions';
import { useReqFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { getDefaultSTTModel } from '@fastgpt/service/core/ai/model';
const upload = getUploadModel({
maxSize: 5
@@ -36,7 +37,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
filePaths = [file.path];
if (!global.whisperModel) {
if (!getDefaultSTTModel()) {
throw new Error('whisper model not found');
}
@@ -65,7 +66,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
// }
const result = await aiTranscriptions({
model: global.whisperModel.model,
model: getDefaultSTTModel().model,
fileStream: fs.createReadStream(file.path)
});
@@ -89,7 +90,10 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
removeFilesByPaths(filePaths);
}
export default NextAPI(useReqFrequencyLimit(1, 1), handler);
export default NextAPI(
useIPFrequencyLimit({ id: 'transcriptions', seconds: 1, limit: 1 }),
handler
);
export const config = {
api: {

View File

@@ -4,7 +4,7 @@ import { pushGenerateVectorUsage } from '@/service/support/wallet/usage/push';
import { getVectorsByText } from '@fastgpt/service/core/ai/embedding';
import { updateApiKeyUsage } from '@fastgpt/service/support/openapi/tools';
import { getUsageSourceByAuthType } from '@fastgpt/global/support/wallet/usage/tools';
import { getVectorModel } from '@fastgpt/service/core/ai/model';
import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { checkTeamAIPoints } from '@fastgpt/service/support/permission/teamLimit';
import { EmbeddingTypeEnm } from '@fastgpt/global/core/ai/constants';
import { NextAPI } from '@/service/middleware/entry';
@@ -36,7 +36,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
const { tokens, vectors } = await getVectorsByText({
input: query,
model: getVectorModel(model),
model: getEmbeddingModel(model),
type
});