V4.8.20 feature (#3686)

* Aiproxy (#3649)

* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile

* feat: usage filter & export & dashbord (#3538)

* feat: usage filter & export & dashbord

* adjust ui

* fix tmb scroll

* fix code & selecte all

* merge

* perf: usages list;perf: move components (#3654)

* perf: usages list

* team sub plan load

* perf: usage dashboard code

* perf: dashboard ui

* perf: move components

* add default model config (#3653)

* 4.8.20 test (#3656)

* provider

* perf: model config

* model perf (#3657)

* fix: model

* dataset quote

* perf: model config

* model tag

* doubao model config

* perf: config model

* feat: model test

* fix: POST 500 error on dingtalk bot (#3655)

* feat: default model (#3662)

* move model config

* feat: default model

* fix: false triggerd org selection (#3661)

* export usage csv i18n (#3660)

* export usage csv i18n

* fix build

* feat: markdown extension (#3663)

* feat: markdown extension

* media cros

* rerank test

* default price

* perf: default model

* fix: cannot custom provider

* fix: default model select

* update bg

* perf: default model selector

* fix: usage export

* i18n

* fix: rerank

* update init extension

* perf: ip limit check

* doubao model order

* web default modle

* perf: tts selector

* perf: tts error

* qrcode package

* reload buffer (#3665)

* reload buffer

* reload buffer

* tts selector

* fix: err tip (#3666)

* fix: err tip

* perf: training queue

* doc

* fix interactive edge (#3659)

* fix interactive edge

* fix

* comment

* add gemini model

* fix: chat model select

* perf: supplement assistant empty response (#3669)

* perf: supplement assistant empty response

* check array

* perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n

* fix: stream response (#3682)

* perf: supplement assistant empty response

* check array

* fix: stream response

* fix: model config cannot set to null

* fix: reasoning response (#3684)

* perf: supplement assistant empty response

* check array

* fix: reasoning response

* fix: reasoning response

* doc (#3685)

* perf: supplement assistant empty response

* check array

* doc

* lock

* animation

* update doc

* update compose

* doc

* doc

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
Archer
2025-02-05 00:10:47 +08:00
committed by GitHub
parent c393002f1d
commit db2c0a0bdb
496 changed files with 9031 additions and 4726 deletions

View File

@@ -4,17 +4,18 @@ import { immer } from 'zustand/middleware/immer';
import axios from 'axios';
import { OAuthEnum } from '@fastgpt/global/support/user/constant';
import type {
AudioSpeechModelType,
TTSModelType,
LLMModelItemType,
ReRankModelItemType,
VectorModelItemType,
EmbeddingModelItemType,
STTModelType
} from '@fastgpt/global/core/ai/model.d';
import { InitDateResponse } from '@/global/common/api/systemRes';
import { FastGPTFeConfigsType } from '@fastgpt/global/common/system/types';
import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type';
import { defaultWhisperModel } from '@fastgpt/global/core/ai/model';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
import { SystemDefaultModelType } from '@fastgpt/service/core/ai/type';
type LoginStoreType = { provider: `${OAuthEnum}`; lastRoute: string; state: string };
@@ -49,12 +50,13 @@ type State = {
feConfigs: FastGPTFeConfigsType;
subPlans?: SubPlanType;
systemVersion: string;
defaultModels: SystemDefaultModelType;
llmModelList: LLMModelItemType[];
datasetModelList: LLMModelItemType[];
vectorModelList: VectorModelItemType[];
audioSpeechModelList: AudioSpeechModelType[];
embeddingModelList: EmbeddingModelItemType[];
ttsModelList: TTSModelType[];
reRankModelList: ReRankModelItemType[];
whisperModel: STTModelType;
sttModelList: STTModelType[];
initStaticData: (e: InitDateResponse) => void;
appType?: string;
setAppType: (e?: string) => void;
@@ -125,12 +127,13 @@ export const useSystemStore = create<State>()(
feConfigs: {},
subPlans: undefined,
systemVersion: '0.0.0',
defaultModels: {},
llmModelList: [],
datasetModelList: [],
vectorModelList: [],
audioSpeechModelList: [],
embeddingModelList: [],
ttsModelList: [],
reRankModelList: [],
whisperModel: defaultWhisperModel,
sttModelList: [],
initStaticData(res) {
set((state) => {
state.initDataBufferId = res.bufferId;
@@ -139,12 +142,24 @@ export const useSystemStore = create<State>()(
state.subPlans = res.subPlans ?? state.subPlans;
state.systemVersion = res.systemVersion ?? state.systemVersion;
state.llmModelList = res.llmModels ?? state.llmModelList;
state.llmModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.llm) ??
state.llmModelList;
state.datasetModelList = state.llmModelList.filter((item) => item.datasetProcess);
state.vectorModelList = res.vectorModels ?? state.vectorModelList;
state.audioSpeechModelList = res.audioSpeechModels ?? state.audioSpeechModelList;
state.reRankModelList = res.reRankModels ?? state.reRankModelList;
state.whisperModel = res.whisperModel ?? state.whisperModel;
state.embeddingModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.embedding) ??
state.embeddingModelList;
state.ttsModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.tts) ??
state.ttsModelList;
state.reRankModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.rerank) ??
state.reRankModelList;
state.sttModelList =
res.activeModelList?.filter((item) => item.type === ModelTypeEnum.stt) ??
state.sttModelList;
state.defaultModels = res.defaultModels ?? state.defaultModels;
});
}
})),
@@ -156,12 +171,13 @@ export const useSystemStore = create<State>()(
feConfigs: state.feConfigs,
subPlans: state.subPlans,
systemVersion: state.systemVersion,
defaultModels: state.defaultModels,
llmModelList: state.llmModelList,
datasetModelList: state.datasetModelList,
vectorModelList: state.vectorModelList,
audioSpeechModelList: state.audioSpeechModelList,
embeddingModelList: state.embeddingModelList,
ttsModelList: state.ttsModelList,
reRankModelList: state.reRankModelList,
whisperModel: state.whisperModel
sttModelList: state.sttModelList
})
}
)

View File

@@ -1,15 +1,58 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { useSystemStore } from './useSystemStore';
export const downloadFetch = async ({ url, filename }: { url: string; filename: string }) => {
const a = document.createElement('a');
a.href = url;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
export const downloadFetch = async ({
url,
filename,
body
}: {
url: string;
filename: string;
body?: Record<string, any>;
}) => {
if (body) {
// fetch data with POST method if body exists
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(body)
});
const blob = await response.blob();
const downloadUrl = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = downloadUrl;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
// clean up the blob URL
window.URL.revokeObjectURL(downloadUrl);
} else {
const a = document.createElement('a');
a.href = url;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
}
};
export const getWebLLMModel = (model?: string) => {
const list = useSystemStore.getState().llmModelList;
return list.find((item) => item.model === model || item.name === model) ?? list[0];
const defaultModels = useSystemStore.getState().defaultModels;
return list.find((item) => item.model === model || item.name === model) ?? defaultModels.llm!;
};
export const getWebDefaultModel = (llmList: LLMModelItemType[] = []) => {
const list = llmList.length > 0 ? llmList : useSystemStore.getState().llmModelList;
const defaultModels = useSystemStore.getState().defaultModels;
return defaultModels.llm && list.find((item) => item.model === defaultModels.llm?.model)
? defaultModels.llm
: list[0];
};