perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n
This commit is contained in:
Archer
2025-02-01 18:04:44 +08:00
committed by archer
parent 9e0379382f
commit 54defd8a3c
46 changed files with 462 additions and 266 deletions

View File

@@ -2,33 +2,23 @@ import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
ChatCompletionMessageParam,
StreamChatType
} from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '../../common/string/tiktoken';
import { getLLMModel } from './model';
export const computedMaxToken = async ({
/*
Count response max token
*/
export const computedMaxToken = ({
maxToken,
model,
filterMessages = []
model
}: {
maxToken?: number;
model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[];
}) => {
if (maxToken === undefined) return;
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = await countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
maxToken = 200;
}
return maxToken;
};
@@ -40,6 +30,7 @@ export const computedTemperature = ({
model: LLMModelItemType;
temperature: number;
}) => {
if (typeof model.maxTemperature !== 'number') return undefined;
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);